source
stringlengths
3
92
c
stringlengths
26
2.25M
kvstore_dist_server.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file mxnet_node.h * \brief implement mxnet nodes */ #ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #include <mxnet/c_api.h> #include <mxnet/kvstore.h> #include <ps/ps.h> #include <queue> #include <string> #include <mutex> #include <condition_variable> #include <memory> #include <functional> #include <future> #include <vector> #include "../profiler/profiler.h" #include "../operator/tensor/elemwise_binary_op-inl.h" #include "../operator/tensor/init_op.h" namespace mxnet { namespace kvstore { // maintain same order in frontend. enum class CommandType { kController, kSetMultiPrecision, kStopServer, kSyncMode, kSetGradientCompression, kSetProfilerParams }; enum class RequestType { kDefaultPushPull, kRowSparsePushPull, kCompressedPushPull }; struct DataHandleType { RequestType requestType; int dtype; }; /*! * Uses Cantor pairing function to generate a unique number given two numbers. * This number can also be inverted to find the unique pair whose Cantor value is this number. * Ref: https://en.wikipedia.org/wiki/Pairing_function#Cantor_pairing_function * \param requestType RequestType * \param dtype integer * \return Cantor value of arguments */ static int GetCommandType(RequestType requestType, int d) { int m = static_cast<int>(requestType); return (((m + d) * (m + d + 1)) / 2) + d; } /*! * Unpairs Cantor value and finds the two integers used to pair. * Then returns DataHandleType object with those numbers. * \param cmd DataHandleCommand generated by GetCommandType function * \return DataHandleType */ static DataHandleType DepairDataHandleType(int cmd) { int w = std::floor((std::sqrt(8 * cmd + 1) - 1)/2); int t = ((w * w) + w) / 2; int y = cmd - t; int x = w - y; CHECK_GE(x, 0); CHECK_GE(y, 0); DataHandleType type; type.requestType = static_cast<RequestType>(x); type.dtype = y; return type; } /** * \brief executor runs a function using the thread called \ref Start */ class Executor { public: /** * \brief start the executor */ void Start() { std::unique_lock<std::mutex> lk(mu_); while (true) { cond_.wait(lk, [this]{return !queue_.empty();}); Block blk = std::move(queue_.front()); queue_.pop(); lk.unlock(); if (blk.f) { blk.f(); blk.p->set_value(); } else { blk.p->set_value(); break; } lk.lock(); } } /** * \brief function */ typedef std::function<void()> Func; /** * \brief let the thread called \ref Start to exec a function. threadsafe */ void Exec(const Func& func) { Block blk(func); auto fut = blk.p->get_future(); { std::lock_guard<std::mutex> lk(mu_); queue_.push(std::move(blk)); cond_.notify_one(); } fut.wait(); } /** * \brief stop the thread, threadsafe */ void Stop() { Exec(Func()); } private: struct Block { explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { } Func f; std::shared_ptr<std::promise<void>> p; }; std::queue<Block> queue_; std::mutex mu_; std::condition_variable cond_; }; class KVStoreDistServer { public: KVStoreDistServer() { using namespace std::placeholders; ps_server_ = new ps::KVServer<char>(0); static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle( std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2)); ps_server_->set_request_handle( std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3)); sync_mode_ = false; gradient_compression_ = std::make_shared<GradientCompression>(); log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false); } ~KVStoreDistServer() { profiler::Profiler::Get()->SetState(profiler::Profiler::ProfilerState(0)); delete ps_server_; } void set_controller(const KVStore::Controller& controller) { CHECK(controller); controller_ = controller; } void set_updater(const KVStore::Updater& updater) { CHECK(updater); updater_ = updater; } /** * \brief blocked until received the command \a kSyncMode */ void Run() { exec_.Start(); } private: struct UpdateBuf { std::vector<ps::KVMeta> request; NDArray merged; // temp_array is used to cast received values as float32 for computation if required NDArray temp_array; }; void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) { CommandType recved_type = static_cast<CommandType>(recved.head); switch (recved_type) { case CommandType::kStopServer: exec_.Stop(); break; case CommandType::kSyncMode: sync_mode_ = true; break; case CommandType::kSetGradientCompression: gradient_compression_->DecodeParams(recved.body); break; case CommandType::kSetProfilerParams: // last char is the type of profiler command ProcessServerProfilerCommands(static_cast<KVStoreServerProfilerCommand> (recved.body.back() - '0'), recved.body); break; case CommandType::kSetMultiPrecision: // uses value 1 for message id from frontend if (!multi_precision_) { multi_precision_ = true; CreateMultiPrecisionCopies(); } break; case CommandType::kController: // this uses value 0 for message id from frontend // let the main thread to execute ctrl, which is necessary for python exec_.Exec([this, recved]() { CHECK(controller_); controller_(recved.head, recved.body); }); break; } app->Response(recved); } /* * For keys already initialized, if necessary create stored_realt. * This will only be used if by some wrong usage of kvstore, * some keys are initialized before optimizer is set. */ void CreateMultiPrecisionCopies() { for (auto const &stored_entry : store_) { const int key = stored_entry.first; const NDArray &stored = stored_entry.second; if (stored.dtype() != mshadow::kFloat32) { auto &stored_realt = store_realt_[key]; if (stored.storage_type() == kRowSparseStorage) { stored_realt = NDArray(kRowSparseStorage, stored.shape(), stored.ctx(), true, mshadow::kFloat32); } else { stored_realt = NDArray(stored.shape(), stored.ctx(), false, mshadow::kFloat32); } auto &update = update_buf_[key]; if (!update.merged.is_none()) { if (update.merged.storage_type() == kRowSparseStorage) { update.merged = NDArray(kRowSparseStorage, update.merged.shape(), update.merged.ctx(), true, mshadow::kFloat32); } else { update.merged = NDArray(update.merged.shape(), update.merged.ctx(), false, mshadow::kFloat32); } } CHECK(update.request.size() == 0) << ps::MyRank() << "Multiprecision mode can not be set while pushes are underway." << "Please set optimizer before pushing keys." << key << " " << update.request.size(); CopyFromTo(stored, stored_realt); } } for (auto const &stored_realt_entry : store_realt_) { stored_realt_entry.second.WaitToRead(); } } void ProcessServerProfilerCommands(KVStoreServerProfilerCommand type, const std::string& body) { switch (type) { case KVStoreServerProfilerCommand::kSetConfig: SetProfilerConfig(body.substr(0, body.size() - 1)); break; case KVStoreServerProfilerCommand::kState: MXSetProfilerState(static_cast<int>(body.front() - '0')); break; case KVStoreServerProfilerCommand::kPause: MXProfilePause(static_cast<int>(body.front() - '0')); break; case KVStoreServerProfilerCommand::kDump: MXDumpProfile(static_cast<int>(body.front() - '0')); break; } } void SetProfilerConfig(std::string params_str) { std::vector<std::string> elems; mxnet::kvstore::split(params_str, ',', std::back_inserter(elems)); std::vector<const char*> ckeys; std::vector<const char*> cvals; ckeys.reserve(elems.size()); cvals.reserve(elems.size()); for (size_t i=0; i < elems.size(); i++) { std::vector<std::string> parts; mxnet::kvstore::split(elems[i], ':', std::back_inserter(parts)); CHECK_EQ(parts.size(), 2) << "Improper profiler config passed from worker"; CHECK(!parts[0].empty()) << "ProfilerConfig parameter is empty"; CHECK(!parts[1].empty()) << "ProfilerConfig value is empty for parameter "<< parts[0]; if (parts[0] == "filename") { parts[1] = "rank" + std::to_string(ps::MyRank()) + "_" + parts[1]; } char* ckey = new char[parts[0].length() + 1]; std::snprintf(ckey, parts[0].length() + 1, "%s", parts[0].c_str()); ckeys.push_back(ckey); char* cval = new char[parts[1].length() + 1]; std::snprintf(cval, parts[1].length() + 1, "%s", parts[1].c_str()); cvals.push_back(cval); } MXSetProfilerConfig(elems.size(), &ckeys[0], &cvals[0]); for (size_t i=0; i < ckeys.size(); i++) { delete[] ckeys[i]; delete[] cvals[i]; } } void DataHandleEx(const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { DataHandleType type = DepairDataHandleType(req_meta.cmd); switch (type.requestType) { case RequestType::kRowSparsePushPull: DataHandleRowSparse(type, req_meta, req_data, server); break; case RequestType::kCompressedPushPull: DataHandleCompressed(type, req_meta, req_data, server); break; case RequestType::kDefaultPushPull: DataHandleDefault(type, req_meta, req_data, server); break; } } inline bool has_multi_precision_copy(const DataHandleType type) { return multi_precision_ && type.dtype != mshadow::kFloat32; } inline void ApplyUpdates(const DataHandleType type, const int key, const ps::KVPairs<char>& req_data, UpdateBuf *update_buf, ps::KVServer<char>* server) { if (!sync_mode_ || update_buf->request.size() == (size_t) ps::NumWorkers()) { // let the main thread to execute updater_, which is necessary for python auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key]; auto& update = sync_mode_ ? update_buf->merged : update_buf->temp_array; if (updater_) { exec_.Exec([this, key, &update, &stored](){ CHECK(updater_); updater_(key, update, &stored); }); } else { CHECK(sync_mode_) << "Updater needs to be set for async mode"; // if no updater, just copy CopyFromTo(update_buf->merged, &stored); } if (log_verbose_) { LOG(INFO) << "sent response to " << update_buf->request.size() << " workers"; } for (const auto& req : update_buf->request) { /** * Request can be for either push, pull or pushpull * If pull flag is set, respond immediately with the updated values * Otherwise, only send the notification */ if (req.pull) { DefaultStorageResponse(type, key, req, req_data, server); } else { server->Response(req); } } update_buf->request.clear(); if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]); stored.WaitToRead(); } else { update_buf->merged.WaitToRead(); } } void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices, const int64_t master_key, const int64_t num_rows) { indices[0] = 0; for (int64_t i = 1; i <= num_rows; i++) { int key = DecodeKey(keys[i]); auto row_id = key - master_key; indices[i - 1] = row_id; } } void AccumulateRowSparseGrads(const DataHandleType type, const NDArray& recved, UpdateBuf* updateBuf) { NDArray out(kRowSparseStorage, updateBuf->merged.shape(), Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); if (has_multi_precision_copy(type)) CopyFromTo(recved, updateBuf->temp_array); const NDArray& to_merge = has_multi_precision_copy(type) ? updateBuf->temp_array : recved; // accumulate row_sparse gradients using namespace mshadow; Engine::Get()->PushAsync( [to_merge, updateBuf, out](RunContext ctx, Engine::CallbackOnComplete on_complete) { op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>( {}, {}, {to_merge, updateBuf->merged}, {kWriteTo}, {out}); on_complete(); }, to_merge.ctx(), {to_merge.var(), updateBuf->merged.var()}, {out.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); CopyFromTo(out, &(updateBuf->merged), 0); updateBuf->merged.WaitToRead(); } void RowSparsePullResponse(const DataHandleType type, const int master_key, const size_t num_rows, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { if (log_verbose_) LOG(INFO) << "pull: " << master_key; ps::KVPairs<char> response; if (num_rows == 0) { std::vector<int> lens(req_data.keys.size(), 0); response.keys = req_data.keys; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); return; } const NDArray& stored = store_[master_key]; if (has_multi_precision_copy(type)) stored.WaitToRead(); CHECK(!stored.is_none()) << "init " << master_key << " first"; auto shape = stored.shape(); auto unit_len = shape.ProdShape(1, shape.ndim()); const int num_bytes = mshadow::mshadow_sizeof(type.dtype); const int unit_size = unit_len * num_bytes; const char* data = static_cast<char *> (stored.data().dptr_); auto len = num_rows * unit_size; // concat values response.vals.resize(len); #pragma omp parallel for for (size_t i = 1; i <= num_rows; i++) { int key = DecodeKey(req_data.keys[i]); int64_t row_id = key - master_key; const auto src = data + row_id * unit_size; auto begin = (i - 1) * unit_size; auto end = i * unit_size; response.vals.segment(begin, end).CopyFrom(src, unit_size); } // setup response response.keys = req_data.keys; std::vector<int> lens(req_data.keys.size(), unit_len); lens[0] = 0; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); } void InitRowSparseStored(const DataHandleType type, const int master_key, const size_t num_rows, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { auto& stored = has_multi_precision_copy(type) ? store_realt_[master_key] : store_[master_key]; int dtype = type.dtype; int num_bytes = mshadow::mshadow_sizeof(dtype); auto unit_len = req_data.lens[1] / num_bytes; CHECK_GT(unit_len, 0); size_t ds[] = {num_rows, (size_t) unit_len}; mxnet::TShape dshape(ds, ds + 2); CHECK_EQ(req_data.vals.size(), num_rows * unit_len * num_bytes); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) NDArray recved = NDArray(recv_blob, 0); stored = NDArray(kRowSparseStorage, dshape, Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); if (has_multi_precision_copy(type)) { store_[master_key] = NDArray(kRowSparseStorage, dshape, Context(), true, type.dtype); } Engine::Get()->PushAsync( [this, recved, stored, type](RunContext ctx, Engine::CallbackOnComplete on_complete) { NDArray rsp = stored; stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])}); mshadow::Stream<cpu> *s = ctx.get_stream<cpu>(); using namespace mxnet::op; nnvm::dim_t nnr = rsp.shape()[0]; MSHADOW_IDX_TYPE_SWITCH(rsp.aux_type(rowsparse::kIdx), IType, { IType* idx = rsp.aux_data(rowsparse::kIdx).dptr<IType>(); mxnet_op::Kernel<PopulateFullIdxRspKernel, cpu>::Launch(s, nnr, idx); }); TBlob rsp_data = rsp.data(); // copies or casts as appropriate ndarray::Copy<cpu, cpu>(recved.data(), &rsp_data, Context(), Context(), RunContext()); on_complete(); }, recved.ctx(), {recved.var()}, {stored.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); if (has_multi_precision_copy(type)) { CopyFromTo(stored, store_[master_key]); store_[master_key].WaitToRead(); } stored.WaitToRead(); server->Response(req_meta); } void DataHandleRowSparse(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { int master_key = DecodeKey(req_data.keys[0]); auto num_rows = req_data.keys.size() - 1; auto& stored = store_[master_key]; if (req_meta.push) { CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty"; CHECK_EQ(req_data.lens[0], 0); if (stored.is_none()) { if (log_verbose_) LOG(INFO) << "initial push: " << master_key; // initialization CHECK_GT(num_rows, 0) << "init with empty data is not supported"; InitRowSparseStored(type, master_key, num_rows, req_meta, req_data, server); return; } else { if (log_verbose_) LOG(INFO) << "push: " << master_key << " " << req_data.keys; auto& updates = update_buf_[master_key]; if (sync_mode_ && updates.merged.is_none()) { updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); } if (has_multi_precision_copy(type) && updates.temp_array.is_none()) { updates.temp_array = NDArray(kRowSparseStorage, stored.shape(), Context(), false, mshadow::kFloat32); } if (num_rows == 0) { if (sync_mode_) { if (updates.request.empty()) { // reset to zeros int merged_dtype = has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype; updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true, merged_dtype); } // else nothing to aggregate updates.request.push_back(req_meta); ApplyUpdates(type, master_key, req_data, &updates, server); } else { server->Response(req_meta); } } else { auto unit_len = req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype); CHECK_GT(unit_len, 0); // indices std::vector<int64_t> indices(num_rows); DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows); // data TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask); size_t ds[] = {(size_t) num_rows, (size_t) unit_len}; mxnet::TShape dshape(ds, ds + 2); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) // row_sparse NDArray NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0); if (updates.request.empty()) { if (sync_mode_) { CopyFromTo(recved, updates.merged); } else { if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); } else { updates.temp_array = recved; } } } else { CHECK(sync_mode_); AccumulateRowSparseGrads(type, recved, &updates); } updates.request.push_back(req_meta); ApplyUpdates(type, master_key, req_data, &updates, server); } } } else { // pull RowSparsePullResponse(type, master_key, num_rows, req_meta, req_data, server); } } void DefaultStorageResponse(const DataHandleType type, const int key, const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data, ps::KVServer<char>* server) { ps::KVPairs<char> response; const NDArray& stored = store_[key]; CHECK(!stored.is_none()) << "init " << key << " first"; // as server returns when store_realt is ready in this case if (has_multi_precision_copy(type)) stored.WaitToRead(); auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype()); response.keys = req_data.keys; response.lens = {len}; // TODO(mli) try to remove this CopyFrom response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len); server->Response(req_meta, response); } void DataHandleCompressed(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data, ps::KVServer<char>* server) { CHECK_EQ(type.dtype, mshadow::kFloat32) << "Gradient compression is currently supported for fp32 only"; if (req_meta.push) { // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished // first for dummy key which represents original size of array, whose len is 0 CHECK_EQ(req_data.keys.size(), (size_t)2); CHECK_EQ(req_data.lens.size(), (size_t)2); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]); int original_size = DecodeKey(req_data.keys[0]); int key = DecodeKey(req_data.keys[1]); auto& stored = store_[key]; size_t ds[] = {(size_t)req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype)}; mxnet::TShape dshape(ds, ds + 1); TBlob recv_blob(reinterpret_cast<real_t*>(req_data.vals.data()), dshape, cpu::kDevMask); NDArray recved = NDArray(recv_blob, 0); NDArray decomp_buf = decomp_buf_[key]; dshape = mxnet::TShape{(int64_t) original_size}; if (decomp_buf.is_none()) { decomp_buf = NDArray(dshape, Context()); } if (stored.is_none()) { stored = NDArray(dshape, Context()); gradient_compression_->Dequantize(recved, &stored, 0); server->Response(req_meta); stored.WaitToRead(); } else if (sync_mode_) { // synced push auto& merged = update_buf_[key]; if (merged.merged.is_none()) { merged.merged = NDArray(dshape, Context()); } if (merged.request.size() == 0) { gradient_compression_->Dequantize(recved, &merged.merged, 0); } else { gradient_compression_->Dequantize(recved, &decomp_buf, 0); merged.merged += decomp_buf; } merged.request.push_back(req_meta); ApplyUpdates(type, key, req_data, &merged, server); } else { // async push gradient_compression_->Dequantize(recved, &decomp_buf, 0); exec_.Exec([this, key, &decomp_buf, &stored]() { CHECK(updater_); updater_(key, decomp_buf, &stored); }); server->Response(req_meta); stored.WaitToRead(); } } else { // pull CHECK_EQ(req_data.keys.size(), (size_t)1); CHECK_EQ(req_data.lens.size(), (size_t)0); int key = DecodeKey(req_data.keys[0]); DefaultStorageResponse(type, key, req_meta, req_data, server); } } void DataHandleDefault(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data, ps::KVServer<char>* server) { // do some check CHECK_EQ(req_data.keys.size(), (size_t)1); if (req_meta.push) { CHECK_EQ(req_data.lens.size(), (size_t)1); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]); } int key = DecodeKey(req_data.keys[0]); auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key]; // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished if (req_meta.push) { size_t ds[] = {(size_t) req_data.lens[0] / mshadow::mshadow_sizeof(type.dtype)}; mxnet::TShape dshape(ds, ds + 1); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) NDArray recved = NDArray(recv_blob, 0); if (stored.is_none()) { // initialization stored = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); CopyFromTo(recved, &stored, 0); server->Response(req_meta); if (has_multi_precision_copy(type)) { auto& stored_dtype = store_[key]; stored_dtype = NDArray(dshape, Context(), false, type.dtype); CopyFromTo(stored, stored_dtype); stored_dtype.WaitToRead(); } stored.WaitToRead(); } else { auto &updates = update_buf_[key]; if (sync_mode_ && updates.merged.is_none()) { updates.merged = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); } if (has_multi_precision_copy(type) && updates.temp_array.is_none()) { updates.temp_array = NDArray(dshape, Context(), false, mshadow::kFloat32); } if (updates.request.empty()) { if (sync_mode_) { CopyFromTo(recved, updates.merged); } else { if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); } else { updates.temp_array = recved; } } } else { CHECK(sync_mode_); if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); updates.merged += updates.temp_array; } else { updates.merged += recved; } } updates.request.push_back(req_meta); ApplyUpdates(type, key, req_data, &updates, server); } } else { DefaultStorageResponse(type, key, req_meta, req_data, server); } } int DecodeKey(ps::Key key) { auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()]; return key - kr.begin(); } /** * \brief user defined mode for push */ bool sync_mode_; KVStore::Controller controller_; KVStore::Updater updater_; /** * \brief store_ contains the value at kvstore for each key */ std::unordered_map<int, NDArray> store_; std::unordered_map<int, NDArray> store_realt_; /** * \brief merge_buf_ is a buffer used if sync_mode is true. It represents * values from different workers being merged. The store will be updated * to this value when values from all workers are pushed into this buffer. */ std::unordered_map<int, UpdateBuf> update_buf_; /** * \brief decomp_buf_ is a buffer into which compressed values are * decompressed before merging to the store. used when compress_!='none' */ std::unordered_map<int, NDArray> decomp_buf_; Executor exec_; ps::KVServer<char>* ps_server_; // whether to LOG verbose information bool log_verbose_; /* * \brief whether to use multi precision mode. * in multi precision mode, all weights are stored as float32. * any gradient received will be cast to float32 before accumulation and updating of weights. */ bool multi_precision_; /** * \brief gradient compression object. * starts with none, used after SetGradientCompression sets the type * currently there is no support for unsetting gradient compression */ std::shared_ptr<kvstore::GradientCompression> gradient_compression_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
merge_omp.c
#include <pthread.h> #include <stdio.h> #include <stdlib.h> #define DEBUG 0 struct merge_data { int i; int j; int *data; }; void mergesort(void *data) { struct merge_data *value = (struct merge_data*)data; struct merge_data right_side; struct merge_data left_side; int i, j, k, middle, vector_len; middle = (int) (value->i+value->j)/2; vector_len = (value->j-value->i); if (DEBUG) { printf("ENTREI NO MERGE %d %d %d -- Meu vetor: ", value->i, value->j, middle); for(i=0 ; i <= (value->j - value->i) ; i++) { printf("%d ", value->data[i]); } printf("\n"); } // omp_set_num_threads(2); // printf(" vector_len = %d total_threads = %d\n", vector_len, omp_get_num_threads()); if(vector_len >= 1) { /*Dividi o programa em duas seções. Cada seção explora a ordenação de metade do vetor. Dessa forma, temos duas threads trabalhando o tempo todo, cada uma em uma metade.*/ #pragma omp parallel sections { #pragma omp section { left_side.data = (int *) malloc(sizeof(int) * (middle - value->i + 1)); left_side.i = 0; left_side.j = (middle - value->i); /*Tentei paralelizar esses loops, porem pelo que encontrei (http://stackoverflow.com/questions/17575329/invalid-controlling-predicate-compiler-error-using-openmp) o openmp não dá suporte a loops com mais de uma variavel de controle. Por isso comentei o uso dessas diretivas*/ // #pragma omp parallel for for(i=value->i, j=0 ; i <=middle ; i++, j++) { left_side.data[j] = value->data[i]; } if (DEBUG) { printf("VETOR left_side: "); for(i=0 ; i < (middle - value->i + 1) ; i++) { printf("%d ", left_side.data[i]); } printf("\n"); } mergesort((void *)&left_side); } #pragma omp section { right_side.data = (int *) malloc(sizeof(int) * (value->j - middle)); right_side.i = 0; right_side.j = (value->j - middle - 1); // #pragma omp parallel for for(i=middle+1, j=0 ; i <= value->j ; i++, j++) { right_side.data[j] = value->data[i]; } if (DEBUG) { printf("VETOR right_side: "); for(i=0 ; i < (value->j - middle) ; i++) { printf("%d ", right_side.data[i]); } printf("\n\n"); } mergesort((void *)&right_side); } } } else { left_side.data = (int *) malloc(sizeof(int) * (1)); left_side.data[0] = value->data[value->i]; left_side.i = value->i; left_side.j = value->j; right_side.data = (int *) malloc(sizeof(int) * (1)); right_side.data[0] = value->data[value->i]; right_side.i = value->i; right_side.j = value->j; } // Junta as duas partes i = j = 0; //#pragma omp parallel for for(k=value->i ; k<=value->j && (i<=right_side.j) && (j<=left_side.j) ; k++) { // Decide se vai usar o left ou o right if(right_side.data[i] < left_side.data[j]) { value->data[k] = right_side.data[i++]; } else { value->data[k] = left_side.data[j++]; } } // Adiciona o que sobrou no data while((i<=right_side.j)) { value->data[k++] = right_side.data[i++]; } while((j<=left_side.j)) { value->data[k++] = left_side.data[j++]; } if (DEBUG) { printf("VETOR PARCIALMENTE ORDENADO\n"); for(k=0 ; k<=value->j ; k++) { printf ("%d ", value->data[k]); } printf("\n\n\n"); } free(left_side.data); free(right_side.data); } int * generate_randon_vector(int n) { int i; int * vector = (int *) malloc(sizeof(int) * n); srand(time(NULL)); for(i=0 ; i<n ; i++) { int x = rand(); vector[i] = x%(3*n); } return vector; } int main (int argc, char *argv[]) { int i; int rc; int n; struct merge_data struct_data; n = 1280000; // Precisei criar um vetor de tamanho 4000000 para verificar as threads executando pelo monitor do sistema. int * data = generate_randon_vector(n); /* printf("VETOR Inicial\n"); for(i=0 ; i< n ; i++) { printf("%d ", data[i]); } printf("\n\n");*/ struct_data.data = data; struct_data.i = 0; struct_data.j = n-1; mergesort((void *)&struct_data); /* printf("\n\nVETOR ORDENADO\n"); for(i=0 ; i< n ; i++) { printf("%d ", data[i]); } printf("\n\n"); */ // free(struct_data.data); /* Last thing that main() should do */ //pthread_exit(NULL); }
pcpaesmctrcaomp.c
/******************************************************************************* * Copyright 2013-2019 Intel Corporation * All Rights Reserved. * * If this software was obtained under the Intel Simplified Software License, * the following terms apply: * * The source code, information and material ("Material") contained herein is * owned by Intel Corporation or its suppliers or licensors, and title to such * Material remains with Intel Corporation or its suppliers or licensors. The * Material contains proprietary information of Intel or its suppliers and * licensors. The Material is protected by worldwide copyright laws and treaty * provisions. No part of the Material may be used, copied, reproduced, * modified, published, uploaded, posted, transmitted, distributed or disclosed * in any way without Intel's prior express written permission. No license under * any patent, copyright or other intellectual property rights in the Material * is granted to or conferred upon you, either expressly, by implication, * inducement, estoppel or otherwise. Any license under such intellectual * property rights must be express and approved by Intel in writing. * * Unless otherwise agreed by Intel in writing, you may not remove or alter this * notice or any other notice embedded in Materials by Intel or Intel's * suppliers or licensors in any way. * * * If this software was obtained under the Apache License, Version 2.0 (the * "License"), the following terms apply: * * You may not use this file except in compliance with the License. You may * obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /* // Purpose: // Cryptography Primitive. // AES encryption/decryption (CTR mode) // // Contents: // ippsAESEncryptCTR() // ippsAESDecryptCTR() // // */ #include "owndefs.h" #if defined(_OPENMP) #include "owncp.h" #include "pcpaesm.h" #include "pcptool.h" #include "omp.h" #if (_ALG_AES_SAFE_==_ALG_AES_SAFE_COMPOSITE_GF_) #elif (_ALG_AES_SAFE_==_ALG_AES_SAFE_COMPACT_SBOX_) # include "pcprijtables.h" #else #endif /* // AES-CTR processing. // // Returns: Reason: // ippStsNullPtrErr pCtx == NULL // pSrc == NULL // pDst == NULL // pCtrValue ==NULL // ippStsContextMatchErr !VALID_AES_ID() // ippStsLengthErr len <1 // ippStsCTRSizeErr 128 < ctrNumBitSize < 1 // ippStsNoErr no errors // // Parameters: // pSrc pointer to the source data buffer // pDst pointer to the target data buffer // dataLen input/output buffer length (in bytes) // pCtx pointer to rge AES context // pCtrValue pointer to the counter block // ctrNumBitSize counter block size (bits) // // Note: // counter will updated on return // */ static void AES_CTR_processing(const Ipp8u* pSrc, Ipp8u* pDst, int nBlocks, const IppsAESSpec* pCtx, Ipp8u* pCtrValue, int ctrNumBitSize) { #if (_IPP>=_IPP_P8) || (_IPP32E>=_IPP32E_Y8) /* use pipelined version if possible */ if(AES_NI_ENABLED==RIJ_AESNI(pCtx)) { /* construct ctr mask */ Ipp8u maskIV[MBS_RIJ128]; int n; int maskPosition = (MBS_RIJ128*8-ctrNumBitSize)/8; Ipp8u maskValue = (Ipp8u)(0xFF >> (MBS_RIJ128*8-ctrNumBitSize)%8 ); for(n=0; n<maskPosition; n++) maskIV[n] = 0; maskIV[maskPosition] = maskValue; for(n=maskPosition+1; n<16; n++) maskIV[n] = 0xFF; EncryptCTR_RIJ128pipe_AES_NI(pSrc, pDst, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), nBlocks*MBS_RIJ128, pCtrValue, (Ipp8u*)maskIV); } else #endif { /* setup encoder method */ RijnCipher encoder = RIJ_ENCODER(pCtx); Ipp32u output[NB(128)]; /* copy counter value */ Ipp32u ctr[NB(128)]; CopyBlock16(pCtrValue, ctr ); /* // block-by-block processing */ while(nBlocks) { /* encrypt counter block */ #if (_ALG_AES_SAFE_==_ALG_AES_SAFE_COMPACT_SBOX_) encoder((Ipp8u*)ctr, (Ipp8u*)output, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), RijEncSbox/*NULL*/); #else encoder((Ipp8u*)ctr, (Ipp8u*)output, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), NULL); #endif /* compute ciphertext block */ XorBlock16(pSrc, output, pDst); /* encrement counter block */ StdIncrement((Ipp8u*)ctr,MBS_RIJ128*8, ctrNumBitSize); pSrc += MBS_RIJ128; pDst += MBS_RIJ128; nBlocks--; } /* copy counter back */ CopyBlock16(ctr, pCtrValue); } } static IppStatus AES_ctr(const Ipp8u* pSrc, Ipp8u* pDst, int srcLen, const IppsAESSpec* pCtx, Ipp8u* pCtrValue, int ctrNumBitSize) { /* test the pointers */ IPP_BAD_PTR4_RET(pSrc, pDst, pCtx, pCtrValue); /* align the context */ pCtx = (IppsAESSpec*)(IPP_ALIGNED_PTR(pCtx, AES_ALIGNMENT)); /* test the context ID */ IPP_BADARG_RET(!VALID_AES_ID( pCtx ), ippStsContextMatchErr); /* test the data stream length */ IPP_BADARG_RET((srcLen<1), ippStsLengthErr); /* test the counter block size */ IPP_BADARG_RET((128<ctrNumBitSize) || (ctrNumBitSize<1), ippStsCTRSizeErr); { int nBlocks = srcLen / MBS_RIJ128; if(nBlocks) { int blk_per_thread = AES_NI_ENABLED==RIJ_AESNI(pCtx)? AESNI128_MIN_BLK_PER_THREAD : RIJ128_MIN_BLK_PER_THREAD; int nThreads = IPP_MIN(IPPCP_GET_NUM_THREADS(), IPP_MAX(nBlocks/blk_per_thread, 1)); if(1==nThreads) { AES_CTR_processing(pSrc, pDst, nBlocks, pCtx, pCtrValue, ctrNumBitSize); goto ctr_tail; } else { int blksThreadReg; int blksThreadTail; #pragma omp parallel IPPCP_OMP_LIMIT_MAX_NUM_THREADS(nThreads) { #pragma omp master { nThreads = omp_get_num_threads(); blksThreadReg = nBlocks / nThreads; blksThreadTail = blksThreadReg + nBlocks % nThreads; } #pragma omp barrier { int id = omp_get_thread_num(); Ipp8u* pThreadSrc = (Ipp8u*)pSrc + id*blksThreadReg * MBS_RIJ128; Ipp8u* pThreadDst = (Ipp8u*)pDst + id*blksThreadReg * MBS_RIJ128; int blkThread = (id==(nThreads-1))? blksThreadTail : blksThreadReg; /* compute thread conter */ Ipp8u thread_counter[MBS_RIJ128]; ompStdIncrement128(pCtrValue, thread_counter, ctrNumBitSize, id*blksThreadReg); AES_CTR_processing(pThreadSrc, pThreadDst, blkThread, pCtx, thread_counter, ctrNumBitSize); } } /* update counter */ ompStdIncrement128(pCtrValue, pCtrValue, ctrNumBitSize, nBlocks); } } ctr_tail: /* process the rest of data block if any */ srcLen &= MBS_RIJ128-1; if(srcLen) { Ipp32u counter[NB(128)]; Ipp32u output[NB(128)]; /* setup encoder method */ RijnCipher encoder = RIJ_ENCODER(pCtx); /* copy counter */ CopyBlock16(pCtrValue, counter); pSrc += nBlocks*MBS_RIJ128; pDst += nBlocks*MBS_RIJ128; /* encrypt counter block */ #if (_ALG_AES_SAFE_==_ALG_AES_SAFE_COMPACT_SBOX_) encoder((Ipp8u*)counter, (Ipp8u*)output, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), RijEncSbox/*NULL*/); #else encoder((Ipp8u*)counter, (Ipp8u*)output, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), NULL); #endif /* compute ciphertext block */ XorBlock(pSrc, output, pDst, srcLen); /* encrement counter block */ StdIncrement((Ipp8u*)counter, MBS_RIJ128*8, ctrNumBitSize); /* copy counter back */ CopyBlock16(counter, pCtrValue); } return ippStsNoErr; } } IPPFUN(IppStatus, ippsAESEncryptCTR,(const Ipp8u* pSrc, Ipp8u* pDst, int dataLen, const IppsAESSpec* pCtx, Ipp8u* pCtrValue, int ctrNumBitSize )) { return AES_ctr(pSrc, pDst, dataLen, pCtx, pCtrValue, ctrNumBitSize); } IPPFUN(IppStatus, ippsAESDecryptCTR,(const Ipp8u* pSrc, Ipp8u* pDst, int dataLen, const IppsAESSpec* pCtx, Ipp8u* pCtrValue, int ctrNumBitSize )) { return AES_ctr(pSrc, pDst, dataLen, pCtx, pCtrValue, ctrNumBitSize); } #endif /* _OPENMP */
SpatialAdaptiveAveragePooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialAdaptiveAveragePooling.c" #else #define START_IND(a,b,c) (int)floor((float)(a * c) / b) #define END_IND(a,b,c) (int)ceil((float)((a + 1) * c) / b) // #define START_IND(a,b,c) a * c / b // #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0 // 4d tensor B x D x H x W static void THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)( real *input_p, real *output_p, int64_t sizeD, int64_t isizeH, int64_t isizeW, int64_t osizeH, int64_t osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { int64_t d; #pragma omp parallel for private(d) for (d = 0; d < sizeD; d++) { /* loop over output */ int64_t oh, ow; for(oh = 0; oh < osizeH; oh++) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = 0; ow < osizeW; ow++) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; /* local pointers */ real *ip = input_p + d*istrideD + istartH*istrideH + istartW*istrideW; real *op = output_p + d*osizeH*osizeW + oh*osizeW + ow; /* compute local average: */ real sum = 0; int ih, iw; for(ih = 0; ih < kH; ih++) { for(iw = 0; iw < kW; iw++) { real val = *(ip + ih*istrideH + iw*istrideW); sum += val; } } /* set output to local average */ *op = sum / kW / kH; } } } } void THNN_(SpatialAdaptiveAveragePooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, int osizeW, int osizeH) { int dimD = 0; int dimH = 1; int dimW = 2; int64_t sizeB = 1; int64_t sizeD; int64_t isizeH; int64_t isizeW; int64_t istrideB; int64_t istrideD; int64_t istrideH; int64_t istrideW; real *input_data; real *output_data; THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input, "3D or 4D (batch mode) tensor expected for input, but got: %s"); if (input->nDimension == 4) { istrideB = input->stride[0]; sizeB = input->size[0]; dimD++; dimH++; dimW++; } /* sizes */ sizeD = input->size[dimD]; isizeH = input->size[dimH]; isizeW = input->size[dimW]; /* strides */ istrideD = input->stride[dimD]; istrideH = input->stride[dimH]; istrideW = input->stride[dimW]; /* resize output */ if (input->nDimension == 3) { THTensor_(resize3d)(output, sizeD, osizeH, osizeW); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)(input_data, output_data, sizeD, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); } else { int64_t b; THTensor_(resize4d)(output, sizeB, sizeD, osizeH, osizeW); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); #pragma omp parallel for private(b) for (b = 0; b < sizeB; b++) { THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)(input_data+b*istrideB, output_data+b*sizeD*osizeH*osizeW, sizeD, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); } } } static void THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)( real *gradInput_p, real *gradOutput_p, int64_t sizeD, int64_t isizeH, int64_t isizeW, int64_t osizeH, int64_t osizeW) { int64_t d; #pragma omp parallel for private(d) for (d = 0; d < sizeD; d++) { real *gradInput_p_d = gradInput_p + d*isizeW*isizeH; real *gradOutput_p_d = gradOutput_p + d*osizeW*osizeH; /* calculate average */ int64_t oh, ow; for(oh = 0; oh < osizeH; oh++) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = 0; ow < osizeW; ow++) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; real grad_delta = gradOutput_p_d[oh*osizeW +ow] / kH / kW; int ih, iw; for(ih = istartH; ih < iendH; ih++) { for(iw = istartW; iw < iendW; iw++) { /* update gradient */ gradInput_p_d[ih*isizeW + iw] += grad_delta; } } } } } } void THNN_(SpatialAdaptiveAveragePooling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput) { int dimD = 0; int dimH = 1; int dimW = 2; int64_t sizeB = 1; int sizeD; int isizeH; int isizeW; int osizeH; int osizeW; real *gradInput_data; real *gradOutput_data; /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); /* resize */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); if (input->nDimension == 4) { sizeB = input->size[0]; dimD++; dimH++; dimW++; } /* sizes */ sizeD = input->size[dimD]; isizeH = input->size[dimH]; isizeW = input->size[dimW]; osizeH = gradOutput->size[dimH]; osizeW = gradOutput->size[dimW]; /* get raw pointers */ gradInput_data = THTensor_(data)(gradInput); gradOutput_data = THTensor_(data)(gradOutput); /* backprop */ if (input->nDimension == 3) { THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)(gradInput_data, gradOutput_data, sizeD, isizeH, isizeW, osizeH, osizeW); } else { int64_t b; #pragma omp parallel for private(b) for (b = 0; b < sizeB; b++) { THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)(gradInput_data+b*sizeD*isizeH*isizeW, gradOutput_data+b*sizeD*osizeH*osizeW, sizeD, isizeH, isizeW, osizeH, osizeW); } } /* cleanup */ THTensor_(free)(gradOutput); } #endif #undef START_IND #undef END_IND
sort.h
/* * (C) Copyright 2013 ECMWF. * * This software is licensed under the terms of the Apache Licence Version 2.0 * which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. * In applying this licence, ECMWF does not waive the privileges and immunities * granted to it by virtue of its status as an intergovernmental organisation * nor does it submit to any jurisdiction. */ #pragma once #include <algorithm> #include <functional> #include <iterator> #include "atlas/library/config.h" #include "atlas/parallel/omp/omp.h" #if ATLAS_HAVE_OMP #include <omp.h> #define ATLAS_HAVE_OMP_SORTING 1 #else #define ATLAS_HAVE_OMP_SORTING 0 #endif // Bug in Cray 8.5 or below results in segmentation fault in atlas_test_omp_sort #if ATLAS_HAVE_OMP_SORTING && defined(_CRAYC) #if _RELEASE <= 8 && _RELEASE_MINOR < 6 #undef ATLAS_HAVE_OMP_SORTING #define ATLAS_HAVE_OMP_SORTING 0 #endif #endif namespace atlas { namespace omp { /** * sort * ==== * * 1) template <typename RandomAccessIterator> * void sort ( RandomAccessIterator first, RandomAccessIterator last ); * * 2) template <typename RandomAccessIterator, typename Compare> * void sort ( RandomAccessIterator first, RandomAccessIterator last, Compare comp ); * * Sort elements in range * Sorts the elements in the range [first,last) into ascending order. * * The elements are compared using operator< for the first version, and comp for the second. * * Equivalent elements are not guaranteed to keep their original relative order (see stable_sort). * * Parameters * ---------- * first, last * Random-access iterators to the initial and final positions of the sequence to be sorted. The range used is [first,last), * which contains all the elements between first and last, including the element pointed by first but not the element pointed by last. * RandomAccessIterator shall point to a type for which swap is properly defined and which is both move-constructible and move-assignable. * comp * Binary function that accepts two elements in the range as arguments, and returns a value convertible to bool. * The value returned indicates whether the element passed as first argument is considered to go before the second in the specific strict weak ordering it defines. * The function shall not modify any of its arguments. * This can either be a function pointer or a function object. * * * * merge_blocks * ============ * * 1) template<typename RandomAccessIterator, typename RandomAccessIterator2> * void merge_blocks( RandomAccessIterator first, RandomAccessIterator last, * RandomAccessIterator2 blocks_size_first, RandomAccessIterator2 blocks_size_last ); * * * 1) template<typename RandomAccessIterator, typename RandomAccessIterator2, typename Compare > * void merge_blocks( RandomAccessIterator first, RandomAccessIterator last, * RandomAccessIterator2 blocks_size_first, RandomAccessIterator2 blocks_size_last, * Compare compare ); * * Sort elements in range [first + *blocks_first, first + *blocks_last) using a merge sort * where each block in range [blocks_first,blocks_last) is already sorted. * * Parameters * ---------- * first, last * Random-access iterators for bounding the sequence to be sorted * blocks_begin, blocks_end * Random-access iterators that define offsets from paramter "first" of blocks that are already sorted */ namespace detail { #if ATLAS_HAVE_OMP_SORTING template <typename RandomAccessIterator, typename Compare> void merge_sort_recursive(const RandomAccessIterator& iterator, size_t begin, size_t end, Compare compare) { auto size = end - begin; if (size >= 256) { auto mid = begin + size / 2; //#pragma omp taskgroup // --> it would be preferred to use taskgroup and taskyield instead of taskwait, // but this leads to segfaults on Cray (cce/8.5.8) { #if ATLAS_OMP_TASK_UNTIED_SUPPORTED #pragma omp task shared(iterator) untied if (size >= (1 << 15)) #else #pragma omp task shared(iterator) #endif merge_sort_recursive(iterator, begin, mid, compare); #if ATLAS_OMP_TASK_UNTIED_SUPPORTED #pragma omp task shared(iterator) untied if (size >= (1 << 15)) #else #pragma omp task shared(iterator) #endif merge_sort_recursive(iterator, mid, end, compare); //#pragma omp taskyield #pragma omp taskwait } std::inplace_merge(iterator + begin, iterator + mid, iterator + end, compare); } else { std::sort(iterator + begin, iterator + end, compare); } } #endif #if ATLAS_HAVE_OMP_SORTING template <typename RandomAccessIterator, typename Indexable, typename Compare> void merge_blocks_recursive(const RandomAccessIterator& iterator, const Indexable& blocks, size_t blocks_begin, size_t blocks_end, Compare compare) { if (blocks_end <= blocks_begin + 1) { // recursion done, go back out return; } size_t blocks_mid = (blocks_begin + blocks_end) / 2; //#pragma omp taskgroup // --> it would be preferred to use taskgroup and taskyield instead of taskwait, // but this leads to segfaults on Cray (cce/8.5.8) { #pragma omp task shared(iterator, blocks) merge_blocks_recursive(iterator, blocks, blocks_begin, blocks_mid, compare); #pragma omp task shared(iterator, blocks) merge_blocks_recursive(iterator, blocks, blocks_mid, blocks_end, compare); //#pragma omp taskyield #pragma omp taskwait } auto begin = iterator + blocks[blocks_begin]; auto mid = iterator + blocks[blocks_mid]; auto end = iterator + blocks[blocks_end]; std::inplace_merge(begin, mid, end, compare); } #endif template <typename RandomAccessIterator, typename Indexable, typename Compare> void merge_blocks_recursive_seq(RandomAccessIterator& iterator, const Indexable& blocks, size_t blocks_begin, size_t blocks_end, Compare compare) { if (blocks_end <= blocks_begin + 1) { // recursion done, go back out return; } size_t blocks_mid = (blocks_begin + blocks_end) / 2; { merge_blocks_recursive_seq(iterator, blocks, blocks_begin, blocks_mid, compare); merge_blocks_recursive_seq(iterator, blocks, blocks_mid, blocks_end, compare); } auto begin = iterator + blocks[blocks_begin]; auto mid = iterator + blocks[blocks_mid]; auto end = iterator + blocks[blocks_end]; std::inplace_merge(begin, mid, end, compare); } } // namespace detail template <typename RandomAccessIterator, typename Compare> void sort(RandomAccessIterator first, RandomAccessIterator last, Compare compare) { #if ATLAS_HAVE_OMP_SORTING if (atlas_omp_get_max_threads() > 1) { #pragma omp parallel #pragma omp single detail::merge_sort_recursive(first, 0, std::distance(first, last), compare); } else { std::sort(first, last, compare); } #else std::sort(first, last, compare); #endif } template <typename RandomAccessIterator> void sort(RandomAccessIterator first, RandomAccessIterator last) { using value_type = typename std::iterator_traits<RandomAccessIterator>::value_type; ::atlas::omp::sort(first, last, std::less<value_type>()); } template <typename RandomAccessIterator, typename RandomAccessIterator2, typename Compare> void merge_blocks(RandomAccessIterator first, RandomAccessIterator last, RandomAccessIterator2 blocks_size_first, RandomAccessIterator2 blocks_size_last, Compare compare) { using size_type = typename std::iterator_traits<RandomAccessIterator2>::value_type; size_type nb_blocks = std::distance(blocks_size_first, blocks_size_last); std::vector<size_type> blocks_displs(nb_blocks + 1); blocks_displs[0] = 0; for (size_t i = 1; i < blocks_displs.size(); ++i) { blocks_displs[i] = blocks_displs[i - 1] + blocks_size_first[i - 1]; } #if ATLAS_HAVE_OMP_SORTING if (atlas_omp_get_max_threads() > 1) { #pragma omp parallel #pragma omp single detail::merge_blocks_recursive(first, blocks_displs, 0, nb_blocks, compare); } else { detail::merge_blocks_recursive_seq(first, blocks_displs, 0, nb_blocks, compare); } #else detail::merge_blocks_recursive_seq(first, blocks_displs, 0, nb_blocks, compare); #endif } template <typename RandomAccessIterator, typename RandomAccessIterator2> void merge_blocks(RandomAccessIterator first, RandomAccessIterator last, RandomAccessIterator2 blocks_size_first, RandomAccessIterator2 blocks_size_last) { using value_type = typename std::iterator_traits<RandomAccessIterator>::value_type; ::atlas::omp::merge_blocks(first, last, blocks_size_first, blocks_size_last, std::less<value_type>()); } } // namespace omp } // namespace atlas #undef ATLAS_OMP_TASK_UNTIED_SUPPORTED #undef ATLAS_HAVE_OMP_SORTING
main.c
#include <stdint.h> #include "omp.h" #include "common.h" //NOTE Make your configuration here #define NUM_THREADS 8U #define ITERS 3U #define CHECK // #define VERBOSE #include "data/input.h" static inline int32_t checkResults(int32_t *output, uint32_t nbNodes) { uint32_t checksum = 0; int32_t ret = 1U; //1 == fail uint32_t i = 0, j = 0; #ifdef VERBOSE printf("[bfs] Checksumming...\n"); #endif for(i = 0 ; i < nbNodes ; i++) { #ifdef OUTPUT printf("%x \n", output[i]); #endif checksum += output[i]; } #ifdef VERBOSE printf("[bfs] Checksum 0x%x\n", (int)checksum); #endif if(CHECKSUM) { if(CHECKSUM == checksum) { #ifdef VERBOSE printf("[bfs] Check...[" ANSI_COLOR_GREEN "SUCCESS" ANSI_COLOR_RESET "]\n"); #endif ret = 0; } else { printf("[bfs] Check...[" ANSI_COLOR_RED "FAIL" ANSI_COLOR_RESET "]\n"); printf("[bfs] Checksum 0x%x\n", (int) checksum); ret = 1; } } return ret; } //////////////////////////////////////////////////////////////////////////////// //Apply BFS on a Graph using CUDA //////////////////////////////////////////////////////////////////////////////// int main() { uint32_t iter; int32_t ret = -1; //1 == fail // allocate host memory uint8_t *h_graph_mask = (uint8_t*) l1malloc(sizeof(uint8_t)*no_of_nodes); uint8_t *h_updating_graph_mask = (uint8_t*) l1malloc(sizeof(uint8_t)*no_of_nodes); uint8_t *h_graph_visited = (uint8_t*) l1malloc(sizeof(uint8_t)*no_of_nodes); // allocate mem for the result on host side int32_t* h_cost = (int32_t*)l1malloc( sizeof(int32_t)*no_of_nodes); int start, edgeno, i; for(iter = 0; iter < ITERS; ++iter) { // initalize the memory for(i = 0; i < no_of_nodes; i++) { h_graph_mask[i]=false; h_updating_graph_mask[i]=false; h_graph_visited[i]=false; } //set the source node as true in the mask h_graph_mask[source]=true; h_graph_visited[source]=true; for(i=0;i<no_of_nodes;i++) h_cost[i]=-1; h_cost[source]=0; #ifdef VERBOSE printf("[bfs] start traversing the tree size=%d...\n", DIM); #endif profile_start(iter); int k=0; uint8_t stop; do { int tid; //if no thread changes this value then the loop stops stop=false; #pragma omp parallel for private(tid) private(i) for(tid = 0; tid < no_of_nodes; tid++ ) { if (h_graph_mask[tid] == true){ h_graph_mask[tid]=false; for(i=h_graph_nodes[tid].starting; i<(h_graph_nodes[tid].no_of_edges + h_graph_nodes[tid].starting); i++) { int id = h_graph_edges[i]; if(!h_graph_visited[id]) { h_cost[id]=h_cost[tid]+1; h_updating_graph_mask[id]=true; } } } } for(tid=0; tid< no_of_nodes ; tid++ ) { if (h_updating_graph_mask[tid] == true){ h_graph_mask[tid]=true; h_graph_visited[tid]=true; stop=true; h_updating_graph_mask[tid]=false; } } k++; } while(stop); #ifdef VERBOSE printf("[bfs] start traversing done!\n"); #endif profile_stop(iter); #ifdef CHECK ret = checkResults(h_cost, no_of_nodes); if(ret) break; #endif } profile_show(); // cleanup memory l1free( h_graph_mask); l1free( h_updating_graph_mask); l1free( h_graph_visited); l1free( h_cost); return ret; }
GB_unop__identity_fc32_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fc32_uint16 // op(A') function: GB_unop_tran__identity_fc32_uint16 // C type: GxB_FC32_t // A type: uint16_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fc32_uint16 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fc32_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__log1p_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log1p_fp64_fp64) // op(A') function: GB (_unop_tran__log1p_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = log1p (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = log1p (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = log1p (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG1P || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log1p_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = log1p (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = log1p (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log1p_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp-low.c
/* Lowering pass for OMP directives. Converts OMP directives into explicit calls to the runtime library (libgomp), data marshalling to implement data sharing and copying clauses, offloading to accelerators, and more. Contributed by Diego Novillo <dnovillo@redhat.com> Copyright (C) 2005-2015 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "hash-set.h" #include "machmode.h" #include "vec.h" #include "double-int.h" #include "input.h" #include "alias.h" #include "symtab.h" #include "wide-int.h" #include "inchash.h" #include "tree.h" #include "fold-const.h" #include "stringpool.h" #include "stor-layout.h" #include "rtl.h" #include "predict.h" #include "hard-reg-set.h" #include "function.h" #include "dominance.h" #include "cfg.h" #include "cfganal.h" #include "basic-block.h" #include "tree-ssa-alias.h" #include "internal-fn.h" #include "gimple-fold.h" #include "gimple-expr.h" #include "is-a.h" #include "gimple.h" #include "gimplify.h" #include "gimple-iterator.h" #include "gimplify-me.h" #include "gimple-walk.h" #include "tree-iterator.h" #include "tree-inline.h" #include "langhooks.h" #include "diagnostic-core.h" #include "gimple-ssa.h" #include "hash-map.h" #include "plugin-api.h" #include "ipa-ref.h" #include "cgraph.h" #include "tree-cfg.h" #include "tree-phinodes.h" #include "ssa-iterators.h" #include "tree-ssanames.h" #include "tree-into-ssa.h" #include "hashtab.h" #include "flags.h" #include "statistics.h" #include "real.h" #include "fixed-value.h" #include "insn-config.h" #include "expmed.h" #include "dojump.h" #include "explow.h" #include "calls.h" #include "emit-rtl.h" #include "varasm.h" #include "stmt.h" #include "expr.h" #include "tree-dfa.h" #include "tree-ssa.h" #include "tree-pass.h" #include "except.h" #include "splay-tree.h" #include "insn-codes.h" #include "optabs.h" #include "cfgloop.h" #include "target.h" #include "common/common-target.h" #include "omp-low.h" #include "gimple-low.h" #include "tree-cfgcleanup.h" #include "pretty-print.h" #include "alloc-pool.h" #include "symbol-summary.h" #include "ipa-prop.h" #include "tree-nested.h" #include "tree-eh.h" #include "cilk.h" #include "context.h" #include "lto-section-names.h" #include "gomp-constants.h" /* Lowering of OMP parallel and workshare constructs proceeds in two phases. The first phase scans the function looking for OMP statements and then for variables that must be replaced to satisfy data sharing clauses. The second phase expands code for the constructs, as well as re-gimplifying things when variables have been replaced with complex expressions. Final code generation is done by pass_expand_omp. The flowgraph is scanned for regions which are then moved to a new function, to be invoked by the thread library, or offloaded. */ /* OMP region information. Every parallel and workshare directive is enclosed between two markers, the OMP_* directive and a corresponding OMP_RETURN statement. */ struct omp_region { /* The enclosing region. */ struct omp_region *outer; /* First child region. */ struct omp_region *inner; /* Next peer region. */ struct omp_region *next; /* Block containing the omp directive as its last stmt. */ basic_block entry; /* Block containing the OMP_RETURN as its last stmt. */ basic_block exit; /* Block containing the OMP_CONTINUE as its last stmt. */ basic_block cont; /* If this is a combined parallel+workshare region, this is a list of additional arguments needed by the combined parallel+workshare library call. */ vec<tree, va_gc> *ws_args; /* The code for the omp directive of this region. */ enum gimple_code type; /* Schedule kind, only used for OMP_FOR type regions. */ enum omp_clause_schedule_kind sched_kind; /* True if this is a combined parallel+workshare region. */ bool is_combined_parallel; }; /* Levels of parallelism as defined by OpenACC. Increasing numbers correspond to deeper loop nesting levels. */ #define MASK_GANG 1 #define MASK_WORKER 2 #define MASK_VECTOR 4 /* Context structure. Used to store information about each parallel directive in the code. */ typedef struct omp_context { /* This field must be at the beginning, as we do "inheritance": Some callback functions for tree-inline.c (e.g., omp_copy_decl) receive a copy_body_data pointer that is up-casted to an omp_context pointer. */ copy_body_data cb; /* The tree of contexts corresponding to the encountered constructs. */ struct omp_context *outer; gimple stmt; /* Map variables to fields in a structure that allows communication between sending and receiving threads. */ splay_tree field_map; tree record_type; tree sender_decl; tree receiver_decl; /* These are used just by task contexts, if task firstprivate fn is needed. srecord_type is used to communicate from the thread that encountered the task construct to task firstprivate fn, record_type is allocated by GOMP_task, initialized by task firstprivate fn and passed to the task body fn. */ splay_tree sfield_map; tree srecord_type; /* A chain of variables to add to the top-level block surrounding the construct. In the case of a parallel, this is in the child function. */ tree block_vars; /* A map of reduction pointer variables. For accelerators, each reduction variable is replaced with an array. Each thread, in turn, is assigned to a slot on that array. */ splay_tree reduction_map; /* Label to which GOMP_cancel{,llation_point} and explicit and implicit barriers should jump to during omplower pass. */ tree cancel_label; /* What to do with variables with implicitly determined sharing attributes. */ enum omp_clause_default_kind default_kind; /* Nesting depth of this context. Used to beautify error messages re invalid gotos. The outermost ctx is depth 1, with depth 0 being reserved for the main body of the function. */ int depth; /* True if this parallel directive is nested within another. */ bool is_nested; /* True if this construct can be cancelled. */ bool cancellable; /* For OpenACC loops, a mask of gang, worker and vector used at levels below this one. */ int gwv_below; /* For OpenACC loops, a mask of gang, worker and vector used at this level and above. For parallel and kernels clauses, a mask indicating which of num_gangs/num_workers/num_vectors was used. */ int gwv_this; } omp_context; /* A structure holding the elements of: for (V = N1; V cond N2; V += STEP) [...] */ struct omp_for_data_loop { tree v, n1, n2, step; enum tree_code cond_code; }; /* A structure describing the main elements of a parallel loop. */ struct omp_for_data { struct omp_for_data_loop loop; tree chunk_size; gomp_for *for_stmt; tree pre, iter_type; int collapse; bool have_nowait, have_ordered; enum omp_clause_schedule_kind sched_kind; struct omp_for_data_loop *loops; }; static splay_tree all_contexts; static int taskreg_nesting_level; static int target_nesting_level; static struct omp_region *root_omp_region; static bitmap task_shared_vars; static vec<omp_context *> taskreg_contexts; static void scan_omp (gimple_seq *, omp_context *); static tree scan_omp_1_op (tree *, int *, void *); #define WALK_SUBSTMTS \ case GIMPLE_BIND: \ case GIMPLE_TRY: \ case GIMPLE_CATCH: \ case GIMPLE_EH_FILTER: \ case GIMPLE_TRANSACTION: \ /* The sub-statements for these should be walked. */ \ *handled_ops_p = false; \ break; /* Helper function to get the name of the array containing the partial reductions for OpenACC reductions. */ static const char * oacc_get_reduction_array_id (tree node) { const char *id = IDENTIFIER_POINTER (DECL_NAME (node)); int len = strlen ("OACC") + strlen (id); char *temp_name = XALLOCAVEC (char, len + 1); snprintf (temp_name, len + 1, "OACC%s", id); return IDENTIFIER_POINTER (get_identifier (temp_name)); } /* Determine the number of threads OpenACC threads used to determine the size of the array of partial reductions. Currently, this is num_gangs * vector_length. This value may be different than GOACC_GET_NUM_THREADS, because it is independed of the device used. */ static tree oacc_max_threads (omp_context *ctx) { tree nthreads, vector_length, gangs, clauses; gangs = fold_convert (sizetype, integer_one_node); vector_length = gangs; /* The reduction clause may be nested inside a loop directive. Scan for the innermost vector_length clause. */ for (omp_context *oc = ctx; oc; oc = oc->outer) { if (gimple_code (oc->stmt) != GIMPLE_OMP_TARGET || (gimple_omp_target_kind (oc->stmt) != GF_OMP_TARGET_KIND_OACC_PARALLEL)) continue; clauses = gimple_omp_target_clauses (oc->stmt); vector_length = find_omp_clause (clauses, OMP_CLAUSE_VECTOR_LENGTH); if (vector_length) vector_length = fold_convert_loc (OMP_CLAUSE_LOCATION (vector_length), sizetype, OMP_CLAUSE_VECTOR_LENGTH_EXPR (vector_length)); else vector_length = fold_convert (sizetype, integer_one_node); gangs = find_omp_clause (clauses, OMP_CLAUSE_NUM_GANGS); if (gangs) gangs = fold_convert_loc (OMP_CLAUSE_LOCATION (gangs), sizetype, OMP_CLAUSE_NUM_GANGS_EXPR (gangs)); else gangs = fold_convert (sizetype, integer_one_node); break; } nthreads = fold_build2 (MULT_EXPR, sizetype, gangs, vector_length); return nthreads; } /* Holds offload tables with decls. */ vec<tree, va_gc> *offload_funcs, *offload_vars; /* Convenience function for calling scan_omp_1_op on tree operands. */ static inline tree scan_omp_op (tree *tp, omp_context *ctx) { struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); wi.info = ctx; wi.want_locations = true; return walk_tree (tp, scan_omp_1_op, &wi, NULL); } static void lower_omp (gimple_seq *, omp_context *); static tree lookup_decl_in_outer_ctx (tree, omp_context *); static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *); /* Find an OMP clause of type KIND within CLAUSES. */ tree find_omp_clause (tree clauses, enum omp_clause_code kind) { for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses)) if (OMP_CLAUSE_CODE (clauses) == kind) return clauses; return NULL_TREE; } /* Return true if CTX is for an omp parallel. */ static inline bool is_parallel_ctx (omp_context *ctx) { return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL; } /* Return true if CTX is for an omp task. */ static inline bool is_task_ctx (omp_context *ctx) { return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK; } /* Return true if CTX is for an omp parallel or omp task. */ static inline bool is_taskreg_ctx (omp_context *ctx) { return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK; } /* Return true if REGION is a combined parallel+workshare region. */ static inline bool is_combined_parallel (struct omp_region *region) { return region->is_combined_parallel; } /* Extract the header elements of parallel loop FOR_STMT and store them into *FD. */ static void extract_omp_for_data (gomp_for *for_stmt, struct omp_for_data *fd, struct omp_for_data_loop *loops) { tree t, var, *collapse_iter, *collapse_count; tree count = NULL_TREE, iter_type = long_integer_type_node; struct omp_for_data_loop *loop; int i; struct omp_for_data_loop dummy_loop; location_t loc = gimple_location (for_stmt); bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD; bool distribute = gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE; fd->for_stmt = for_stmt; fd->pre = NULL; fd->collapse = gimple_omp_for_collapse (for_stmt); if (fd->collapse > 1) fd->loops = loops; else fd->loops = &fd->loop; fd->have_nowait = distribute || simd; fd->have_ordered = false; fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC; fd->chunk_size = NULL_TREE; if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR) fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR; collapse_iter = NULL; collapse_count = NULL; for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t)) switch (OMP_CLAUSE_CODE (t)) { case OMP_CLAUSE_NOWAIT: fd->have_nowait = true; break; case OMP_CLAUSE_ORDERED: fd->have_ordered = true; break; case OMP_CLAUSE_SCHEDULE: gcc_assert (!distribute); fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t); fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t); break; case OMP_CLAUSE_DIST_SCHEDULE: gcc_assert (distribute); fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t); break; case OMP_CLAUSE_COLLAPSE: if (fd->collapse > 1) { collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t); collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t); } break; default: break; } /* FIXME: for now map schedule(auto) to schedule(static). There should be analysis to determine whether all iterations are approximately the same amount of work (then schedule(static) is best) or if it varies (then schedule(dynamic,N) is better). */ if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO) { fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC; gcc_assert (fd->chunk_size == NULL); } gcc_assert (fd->collapse == 1 || collapse_iter != NULL); if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME) gcc_assert (fd->chunk_size == NULL); else if (fd->chunk_size == NULL) { /* We only need to compute a default chunk size for ordered static loops and dynamic loops. */ if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered) fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC) ? integer_zero_node : integer_one_node; } for (i = 0; i < fd->collapse; i++) { if (fd->collapse == 1) loop = &fd->loop; else if (loops != NULL) loop = loops + i; else loop = &dummy_loop; loop->v = gimple_omp_for_index (for_stmt, i); gcc_assert (SSA_VAR_P (loop->v)); gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE); var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v; loop->n1 = gimple_omp_for_initial (for_stmt, i); loop->cond_code = gimple_omp_for_cond (for_stmt, i); loop->n2 = gimple_omp_for_final (for_stmt, i); switch (loop->cond_code) { case LT_EXPR: case GT_EXPR: break; case NE_EXPR: gcc_assert (gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_CILKSIMD || (gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_CILKFOR)); break; case LE_EXPR: if (POINTER_TYPE_P (TREE_TYPE (loop->n2))) loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1); else loop->n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2, build_int_cst (TREE_TYPE (loop->n2), 1)); loop->cond_code = LT_EXPR; break; case GE_EXPR: if (POINTER_TYPE_P (TREE_TYPE (loop->n2))) loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1); else loop->n2 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2, build_int_cst (TREE_TYPE (loop->n2), 1)); loop->cond_code = GT_EXPR; break; default: gcc_unreachable (); } t = gimple_omp_for_incr (for_stmt, i); gcc_assert (TREE_OPERAND (t, 0) == var); switch (TREE_CODE (t)) { case PLUS_EXPR: loop->step = TREE_OPERAND (t, 1); break; case POINTER_PLUS_EXPR: loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1)); break; case MINUS_EXPR: loop->step = TREE_OPERAND (t, 1); loop->step = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (loop->step), loop->step); break; default: gcc_unreachable (); } if (simd || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC && !fd->have_ordered)) { if (fd->collapse == 1) iter_type = TREE_TYPE (loop->v); else if (i == 0 || TYPE_PRECISION (iter_type) < TYPE_PRECISION (TREE_TYPE (loop->v))) iter_type = build_nonstandard_integer_type (TYPE_PRECISION (TREE_TYPE (loop->v)), 1); } else if (iter_type != long_long_unsigned_type_node) { if (POINTER_TYPE_P (TREE_TYPE (loop->v))) iter_type = long_long_unsigned_type_node; else if (TYPE_UNSIGNED (TREE_TYPE (loop->v)) && TYPE_PRECISION (TREE_TYPE (loop->v)) >= TYPE_PRECISION (iter_type)) { tree n; if (loop->cond_code == LT_EXPR) n = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (loop->v), loop->n2, loop->step); else n = loop->n1; if (TREE_CODE (n) != INTEGER_CST || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n)) iter_type = long_long_unsigned_type_node; } else if (TYPE_PRECISION (TREE_TYPE (loop->v)) > TYPE_PRECISION (iter_type)) { tree n1, n2; if (loop->cond_code == LT_EXPR) { n1 = loop->n1; n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (loop->v), loop->n2, loop->step); } else { n1 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (loop->v), loop->n2, loop->step); n2 = loop->n1; } if (TREE_CODE (n1) != INTEGER_CST || TREE_CODE (n2) != INTEGER_CST || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1) || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type))) iter_type = long_long_unsigned_type_node; } } if (collapse_count && *collapse_count == NULL) { t = fold_binary (loop->cond_code, boolean_type_node, fold_convert (TREE_TYPE (loop->v), loop->n1), fold_convert (TREE_TYPE (loop->v), loop->n2)); if (t && integer_zerop (t)) count = build_zero_cst (long_long_unsigned_type_node); else if ((i == 0 || count != NULL_TREE) && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE && TREE_CONSTANT (loop->n1) && TREE_CONSTANT (loop->n2) && TREE_CODE (loop->step) == INTEGER_CST) { tree itype = TREE_TYPE (loop->v); if (POINTER_TYPE_P (itype)) itype = signed_type_for (itype); t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1)); t = fold_build2_loc (loc, PLUS_EXPR, itype, fold_convert_loc (loc, itype, loop->step), t); t = fold_build2_loc (loc, PLUS_EXPR, itype, t, fold_convert_loc (loc, itype, loop->n2)); t = fold_build2_loc (loc, MINUS_EXPR, itype, t, fold_convert_loc (loc, itype, loop->n1)); if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR) t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, fold_build1_loc (loc, NEGATE_EXPR, itype, t), fold_build1_loc (loc, NEGATE_EXPR, itype, fold_convert_loc (loc, itype, loop->step))); else t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t, fold_convert_loc (loc, itype, loop->step)); t = fold_convert_loc (loc, long_long_unsigned_type_node, t); if (count != NULL_TREE) count = fold_build2_loc (loc, MULT_EXPR, long_long_unsigned_type_node, count, t); else count = t; if (TREE_CODE (count) != INTEGER_CST) count = NULL_TREE; } else if (count && !integer_zerop (count)) count = NULL_TREE; } } if (count && !simd && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered)) { if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node))) iter_type = long_long_unsigned_type_node; else iter_type = long_integer_type_node; } else if (collapse_iter && *collapse_iter != NULL) iter_type = TREE_TYPE (*collapse_iter); fd->iter_type = iter_type; if (collapse_iter && *collapse_iter == NULL) *collapse_iter = create_tmp_var (iter_type, ".iter"); if (collapse_count && *collapse_count == NULL) { if (count) *collapse_count = fold_convert_loc (loc, iter_type, count); else *collapse_count = create_tmp_var (iter_type, ".count"); } if (fd->collapse > 1) { fd->loop.v = *collapse_iter; fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0); fd->loop.n2 = *collapse_count; fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1); fd->loop.cond_code = LT_EXPR; } /* For OpenACC loops, force a chunk size of one, as this avoids the default scheduling where several subsequent iterations are being executed by the same thread. */ if (gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP) { gcc_assert (fd->chunk_size == NULL_TREE); fd->chunk_size = build_int_cst (TREE_TYPE (fd->loop.v), 1); } } /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB is the immediate dominator of PAR_ENTRY_BB, return true if there are no data dependencies that would prevent expanding the parallel directive at PAR_ENTRY_BB as a combined parallel+workshare region. When expanding a combined parallel+workshare region, the call to the child function may need additional arguments in the case of GIMPLE_OMP_FOR regions. In some cases, these arguments are computed out of variables passed in from the parent to the child via 'struct .omp_data_s'. For instance: #pragma omp parallel for schedule (guided, i * 4) for (j ...) Is lowered into: # BLOCK 2 (PAR_ENTRY_BB) .omp_data_o.i = i; #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598) # BLOCK 3 (WS_ENTRY_BB) .omp_data_i = &.omp_data_o; D.1667 = .omp_data_i->i; D.1598 = D.1667 * 4; #pragma omp for schedule (guided, D.1598) When we outline the parallel region, the call to the child function 'bar.omp_fn.0' will need the value D.1598 in its argument list, but that value is computed *after* the call site. So, in principle we cannot do the transformation. To see whether the code in WS_ENTRY_BB blocks the combined parallel+workshare call, we collect all the variables used in the GIMPLE_OMP_FOR header check whether they appear on the LHS of any statement in WS_ENTRY_BB. If so, then we cannot emit the combined call. FIXME. If we had the SSA form built at this point, we could merely hoist the code in block 3 into block 2 and be done with it. But at this point we don't have dataflow information and though we could hack something up here, it is really not worth the aggravation. */ static bool workshare_safe_to_combine_p (basic_block ws_entry_bb) { struct omp_for_data fd; gimple ws_stmt = last_stmt (ws_entry_bb); if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS) return true; gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR); extract_omp_for_data (as_a <gomp_for *> (ws_stmt), &fd, NULL); if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST) return false; if (fd.iter_type != long_integer_type_node) return false; /* FIXME. We give up too easily here. If any of these arguments are not constants, they will likely involve variables that have been mapped into fields of .omp_data_s for sharing with the child function. With appropriate data flow, it would be possible to see through this. */ if (!is_gimple_min_invariant (fd.loop.n1) || !is_gimple_min_invariant (fd.loop.n2) || !is_gimple_min_invariant (fd.loop.step) || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size))) return false; return true; } /* Collect additional arguments needed to emit a combined parallel+workshare call. WS_STMT is the workshare directive being expanded. */ static vec<tree, va_gc> * get_ws_args_for (gimple par_stmt, gimple ws_stmt) { tree t; location_t loc = gimple_location (ws_stmt); vec<tree, va_gc> *ws_args; if (gomp_for *for_stmt = dyn_cast <gomp_for *> (ws_stmt)) { struct omp_for_data fd; tree n1, n2; extract_omp_for_data (for_stmt, &fd, NULL); n1 = fd.loop.n1; n2 = fd.loop.n2; if (gimple_omp_for_combined_into_p (for_stmt)) { tree innerc = find_omp_clause (gimple_omp_parallel_clauses (par_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); } vec_alloc (ws_args, 3 + (fd.chunk_size != 0)); t = fold_convert_loc (loc, long_integer_type_node, n1); ws_args->quick_push (t); t = fold_convert_loc (loc, long_integer_type_node, n2); ws_args->quick_push (t); t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step); ws_args->quick_push (t); if (fd.chunk_size) { t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size); ws_args->quick_push (t); } return ws_args; } else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS) { /* Number of sections is equal to the number of edges from the GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to the exit of the sections region. */ basic_block bb = single_succ (gimple_bb (ws_stmt)); t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1); vec_alloc (ws_args, 1); ws_args->quick_push (t); return ws_args; } gcc_unreachable (); } /* Discover whether REGION is a combined parallel+workshare region. */ static void determine_parallel_type (struct omp_region *region) { basic_block par_entry_bb, par_exit_bb; basic_block ws_entry_bb, ws_exit_bb; if (region == NULL || region->inner == NULL || region->exit == NULL || region->inner->exit == NULL || region->inner->cont == NULL) return; /* We only support parallel+for and parallel+sections. */ if (region->type != GIMPLE_OMP_PARALLEL || (region->inner->type != GIMPLE_OMP_FOR && region->inner->type != GIMPLE_OMP_SECTIONS)) return; /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and WS_EXIT_BB -> PAR_EXIT_BB. */ par_entry_bb = region->entry; par_exit_bb = region->exit; ws_entry_bb = region->inner->entry; ws_exit_bb = region->inner->exit; if (single_succ (par_entry_bb) == ws_entry_bb && single_succ (ws_exit_bb) == par_exit_bb && workshare_safe_to_combine_p (ws_entry_bb) && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb)) || (last_and_only_stmt (ws_entry_bb) && last_and_only_stmt (par_exit_bb)))) { gimple par_stmt = last_stmt (par_entry_bb); gimple ws_stmt = last_stmt (ws_entry_bb); if (region->inner->type == GIMPLE_OMP_FOR) { /* If this is a combined parallel loop, we need to determine whether or not to use the combined library calls. There are two cases where we do not apply the transformation: static loops and any kind of ordered loop. In the first case, we already open code the loop so there is no need to do anything else. In the latter case, the combined parallel loop call would still need extra synchronization to implement ordered semantics, so there would not be any gain in using the combined call. */ tree clauses = gimple_omp_for_clauses (ws_stmt); tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE); if (c == NULL || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC || find_omp_clause (clauses, OMP_CLAUSE_ORDERED)) { region->is_combined_parallel = false; region->inner->is_combined_parallel = false; return; } } region->is_combined_parallel = true; region->inner->is_combined_parallel = true; region->ws_args = get_ws_args_for (par_stmt, ws_stmt); } } /* Return true if EXPR is variable sized. */ static inline bool is_variable_sized (const_tree expr) { return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr))); } /* Return true if DECL is a reference type. */ static inline bool is_reference (tree decl) { return lang_hooks.decls.omp_privatize_by_reference (decl); } /* Return the type of a decl. If the decl is reference type, return its base type. */ static inline tree get_base_type (tree decl) { tree type = TREE_TYPE (decl); if (is_reference (decl)) type = TREE_TYPE (type); return type; } /* Lookup variables. The "maybe" form allows for the variable form to not have been entered, otherwise we assert that the variable must have been entered. */ static inline tree lookup_decl (tree var, omp_context *ctx) { tree *n = ctx->cb.decl_map->get (var); return *n; } static inline tree maybe_lookup_decl (const_tree var, omp_context *ctx) { tree *n = ctx->cb.decl_map->get (const_cast<tree> (var)); return n ? *n : NULL_TREE; } static inline tree lookup_field (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var); return (tree) n->value; } static inline tree lookup_sfield (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->sfield_map ? ctx->sfield_map : ctx->field_map, (splay_tree_key) var); return (tree) n->value; } static inline tree maybe_lookup_field (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var); return n ? (tree) n->value : NULL_TREE; } static inline tree lookup_oacc_reduction (const char *id, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->reduction_map, (splay_tree_key) id); return (tree) n->value; } static inline tree maybe_lookup_oacc_reduction (tree var, omp_context *ctx) { splay_tree_node n = NULL; if (ctx->reduction_map) n = splay_tree_lookup (ctx->reduction_map, (splay_tree_key) var); return n ? (tree) n->value : NULL_TREE; } /* Return true if DECL should be copied by pointer. SHARED_CTX is the parallel context if DECL is to be shared. */ static bool use_pointer_for_field (tree decl, omp_context *shared_ctx) { if (AGGREGATE_TYPE_P (TREE_TYPE (decl))) return true; /* We can only use copy-in/copy-out semantics for shared variables when we know the value is not accessible from an outer scope. */ if (shared_ctx) { gcc_assert (!is_gimple_omp_oacc (shared_ctx->stmt)); /* ??? Trivially accessible from anywhere. But why would we even be passing an address in this case? Should we simply assert this to be false, or should we have a cleanup pass that removes these from the list of mappings? */ if (TREE_STATIC (decl) || DECL_EXTERNAL (decl)) return true; /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell without analyzing the expression whether or not its location is accessible to anyone else. In the case of nested parallel regions it certainly may be. */ if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl)) return true; /* Do not use copy-in/copy-out for variables that have their address taken. */ if (TREE_ADDRESSABLE (decl)) return true; /* lower_send_shared_vars only uses copy-in, but not copy-out for these. */ if (TREE_READONLY (decl) || ((TREE_CODE (decl) == RESULT_DECL || TREE_CODE (decl) == PARM_DECL) && DECL_BY_REFERENCE (decl))) return false; /* Disallow copy-in/out in nested parallel if decl is shared in outer parallel, otherwise each thread could store the shared variable in its own copy-in location, making the variable no longer really shared. */ if (shared_ctx->is_nested) { omp_context *up; for (up = shared_ctx->outer; up; up = up->outer) if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up)) break; if (up) { tree c; for (c = gimple_omp_taskreg_clauses (up->stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED && OMP_CLAUSE_DECL (c) == decl) break; if (c) goto maybe_mark_addressable_and_ret; } } /* For tasks avoid using copy-in/out. As tasks can be deferred or executed in different thread, when GOMP_task returns, the task hasn't necessarily terminated. */ if (is_task_ctx (shared_ctx)) { tree outer; maybe_mark_addressable_and_ret: outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx); if (is_gimple_reg (outer)) { /* Taking address of OUTER in lower_send_shared_vars might need regimplification of everything that uses the variable. */ if (!task_shared_vars) task_shared_vars = BITMAP_ALLOC (NULL); bitmap_set_bit (task_shared_vars, DECL_UID (outer)); TREE_ADDRESSABLE (outer) = 1; } return true; } } return false; } /* Construct a new automatic decl similar to VAR. */ static tree omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx) { tree copy = copy_var_decl (var, name, type); DECL_CONTEXT (copy) = current_function_decl; DECL_CHAIN (copy) = ctx->block_vars; ctx->block_vars = copy; return copy; } static tree omp_copy_decl_1 (tree var, omp_context *ctx) { return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx); } /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it as appropriate. */ static tree omp_build_component_ref (tree obj, tree field) { tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL); if (TREE_THIS_VOLATILE (field)) TREE_THIS_VOLATILE (ret) |= 1; if (TREE_READONLY (field)) TREE_READONLY (ret) |= 1; return ret; } /* Build tree nodes to access the field for VAR on the receiver side. */ static tree build_receiver_ref (tree var, bool by_ref, omp_context *ctx) { tree x, field = lookup_field (var, ctx); /* If the receiver record type was remapped in the child function, remap the field into the new record type. */ x = maybe_lookup_field (field, ctx); if (x != NULL) field = x; x = build_simple_mem_ref (ctx->receiver_decl); x = omp_build_component_ref (x, field); if (by_ref) x = build_simple_mem_ref (x); return x; } /* Build tree nodes to access VAR in the scope outer to CTX. In the case of a parallel, this is a component reference; for workshare constructs this is some variable. */ static tree build_outer_var_ref (tree var, omp_context *ctx) { tree x; if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx))) x = var; else if (is_variable_sized (var)) { x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0); x = build_outer_var_ref (x, ctx); x = build_simple_mem_ref (x); } else if (is_taskreg_ctx (ctx)) { bool by_ref = use_pointer_for_field (var, NULL); x = build_receiver_ref (var, by_ref, ctx); } else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD) { /* #pragma omp simd isn't a worksharing construct, and can reference even private vars in its linear etc. clauses. */ x = NULL_TREE; if (ctx->outer && is_taskreg_ctx (ctx)) x = lookup_decl (var, ctx->outer); else if (ctx->outer) x = maybe_lookup_decl_in_outer_ctx (var, ctx); if (x == NULL_TREE) x = var; } else if (ctx->outer) x = lookup_decl (var, ctx->outer); else if (is_reference (var)) /* This can happen with orphaned constructs. If var is reference, it is possible it is shared and as such valid. */ x = var; else gcc_unreachable (); if (is_reference (var)) x = build_simple_mem_ref (x); return x; } /* Build tree nodes to access the field for VAR on the sender side. */ static tree build_sender_ref (tree var, omp_context *ctx) { tree field = lookup_sfield (var, ctx); return omp_build_component_ref (ctx->sender_decl, field); } /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */ static void install_var_field (tree var, bool by_ref, int mask, omp_context *ctx) { tree field, type, sfield = NULL_TREE; gcc_assert ((mask & 1) == 0 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var)); gcc_assert ((mask & 2) == 0 || !ctx->sfield_map || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var)); gcc_assert ((mask & 3) == 3 || !is_gimple_omp_oacc (ctx->stmt)); type = TREE_TYPE (var); if (mask & 4) { gcc_assert (TREE_CODE (type) == ARRAY_TYPE); type = build_pointer_type (build_pointer_type (type)); } else if (by_ref) type = build_pointer_type (type); else if ((mask & 3) == 1 && is_reference (var)) type = TREE_TYPE (type); field = build_decl (DECL_SOURCE_LOCATION (var), FIELD_DECL, DECL_NAME (var), type); /* Remember what variable this field was created for. This does have a side effect of making dwarf2out ignore this member, so for helpful debugging we clear it later in delete_omp_context. */ DECL_ABSTRACT_ORIGIN (field) = var; if (type == TREE_TYPE (var)) { DECL_ALIGN (field) = DECL_ALIGN (var); DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var); TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var); } else DECL_ALIGN (field) = TYPE_ALIGN (type); if ((mask & 3) == 3) { insert_field_into_struct (ctx->record_type, field); if (ctx->srecord_type) { sfield = build_decl (DECL_SOURCE_LOCATION (var), FIELD_DECL, DECL_NAME (var), type); DECL_ABSTRACT_ORIGIN (sfield) = var; DECL_ALIGN (sfield) = DECL_ALIGN (field); DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field); TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field); insert_field_into_struct (ctx->srecord_type, sfield); } } else { if (ctx->srecord_type == NULL_TREE) { tree t; ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE); ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t)) { sfield = build_decl (DECL_SOURCE_LOCATION (var), FIELD_DECL, DECL_NAME (t), TREE_TYPE (t)); DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t); insert_field_into_struct (ctx->srecord_type, sfield); splay_tree_insert (ctx->sfield_map, (splay_tree_key) DECL_ABSTRACT_ORIGIN (t), (splay_tree_value) sfield); } } sfield = field; insert_field_into_struct ((mask & 1) ? ctx->record_type : ctx->srecord_type, field); } if (mask & 1) splay_tree_insert (ctx->field_map, (splay_tree_key) var, (splay_tree_value) field); if ((mask & 2) && ctx->sfield_map) splay_tree_insert (ctx->sfield_map, (splay_tree_key) var, (splay_tree_value) sfield); } static tree install_var_local (tree var, omp_context *ctx) { tree new_var = omp_copy_decl_1 (var, ctx); insert_decl_map (&ctx->cb, var, new_var); return new_var; } /* Adjust the replacement for DECL in CTX for the new context. This means copying the DECL_VALUE_EXPR, and fixing up the type. */ static void fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug) { tree new_decl, size; new_decl = lookup_decl (decl, ctx); TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb); if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug) && DECL_HAS_VALUE_EXPR_P (decl)) { tree ve = DECL_VALUE_EXPR (decl); walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL); SET_DECL_VALUE_EXPR (new_decl, ve); DECL_HAS_VALUE_EXPR_P (new_decl) = 1; } if (!TREE_CONSTANT (DECL_SIZE (new_decl))) { size = remap_decl (DECL_SIZE (decl), &ctx->cb); if (size == error_mark_node) size = TYPE_SIZE (TREE_TYPE (new_decl)); DECL_SIZE (new_decl) = size; size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb); if (size == error_mark_node) size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl)); DECL_SIZE_UNIT (new_decl) = size; } } /* The callback for remap_decl. Search all containing contexts for a mapping of the variable; this avoids having to duplicate the splay tree ahead of time. We know a mapping doesn't already exist in the given context. Create new mappings to implement default semantics. */ static tree omp_copy_decl (tree var, copy_body_data *cb) { omp_context *ctx = (omp_context *) cb; tree new_var; if (TREE_CODE (var) == LABEL_DECL) { new_var = create_artificial_label (DECL_SOURCE_LOCATION (var)); DECL_CONTEXT (new_var) = current_function_decl; insert_decl_map (&ctx->cb, var, new_var); return new_var; } while (!is_taskreg_ctx (ctx)) { ctx = ctx->outer; if (ctx == NULL) return var; new_var = maybe_lookup_decl (var, ctx); if (new_var) return new_var; } if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn) return var; return error_mark_node; } /* Debugging dumps for parallel regions. */ void dump_omp_region (FILE *, struct omp_region *, int); void debug_omp_region (struct omp_region *); void debug_all_omp_regions (void); /* Dump the parallel region tree rooted at REGION. */ void dump_omp_region (FILE *file, struct omp_region *region, int indent) { fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index, gimple_code_name[region->type]); if (region->inner) dump_omp_region (file, region->inner, indent + 4); if (region->cont) { fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "", region->cont->index); } if (region->exit) fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "", region->exit->index); else fprintf (file, "%*s[no exit marker]\n", indent, ""); if (region->next) dump_omp_region (file, region->next, indent); } DEBUG_FUNCTION void debug_omp_region (struct omp_region *region) { dump_omp_region (stderr, region, 0); } DEBUG_FUNCTION void debug_all_omp_regions (void) { dump_omp_region (stderr, root_omp_region, 0); } /* Create a new parallel region starting at STMT inside region PARENT. */ static struct omp_region * new_omp_region (basic_block bb, enum gimple_code type, struct omp_region *parent) { struct omp_region *region = XCNEW (struct omp_region); region->outer = parent; region->entry = bb; region->type = type; if (parent) { /* This is a nested region. Add it to the list of inner regions in PARENT. */ region->next = parent->inner; parent->inner = region; } else { /* This is a toplevel region. Add it to the list of toplevel regions in ROOT_OMP_REGION. */ region->next = root_omp_region; root_omp_region = region; } return region; } /* Release the memory associated with the region tree rooted at REGION. */ static void free_omp_region_1 (struct omp_region *region) { struct omp_region *i, *n; for (i = region->inner; i ; i = n) { n = i->next; free_omp_region_1 (i); } free (region); } /* Release the memory for the entire omp region tree. */ void free_omp_regions (void) { struct omp_region *r, *n; for (r = root_omp_region; r ; r = n) { n = r->next; free_omp_region_1 (r); } root_omp_region = NULL; } /* Create a new context, with OUTER_CTX being the surrounding context. */ static omp_context * new_omp_context (gimple stmt, omp_context *outer_ctx) { omp_context *ctx = XCNEW (omp_context); splay_tree_insert (all_contexts, (splay_tree_key) stmt, (splay_tree_value) ctx); ctx->stmt = stmt; if (outer_ctx) { ctx->outer = outer_ctx; ctx->cb = outer_ctx->cb; ctx->cb.block = NULL; ctx->depth = outer_ctx->depth + 1; ctx->reduction_map = outer_ctx->reduction_map; } else { ctx->cb.src_fn = current_function_decl; ctx->cb.dst_fn = current_function_decl; ctx->cb.src_node = cgraph_node::get (current_function_decl); gcc_checking_assert (ctx->cb.src_node); ctx->cb.dst_node = ctx->cb.src_node; ctx->cb.src_cfun = cfun; ctx->cb.copy_decl = omp_copy_decl; ctx->cb.eh_lp_nr = 0; ctx->cb.transform_call_graph_edges = CB_CGE_MOVE; ctx->depth = 1; } ctx->cb.decl_map = new hash_map<tree, tree>; return ctx; } static gimple_seq maybe_catch_exception (gimple_seq); /* Finalize task copyfn. */ static void finalize_task_copyfn (gomp_task *task_stmt) { struct function *child_cfun; tree child_fn; gimple_seq seq = NULL, new_seq; gbind *bind; child_fn = gimple_omp_task_copy_fn (task_stmt); if (child_fn == NULL_TREE) return; child_cfun = DECL_STRUCT_FUNCTION (child_fn); DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties; push_cfun (child_cfun); bind = gimplify_body (child_fn, false); gimple_seq_add_stmt (&seq, bind); new_seq = maybe_catch_exception (seq); if (new_seq != seq) { bind = gimple_build_bind (NULL, new_seq, NULL); seq = NULL; gimple_seq_add_stmt (&seq, bind); } gimple_set_body (child_fn, seq); pop_cfun (); /* Inform the callgraph about the new function. */ cgraph_node::add_new_function (child_fn, false); cgraph_node::get (child_fn)->parallelized_function = 1; } /* Destroy a omp_context data structures. Called through the splay tree value delete callback. */ static void delete_omp_context (splay_tree_value value) { omp_context *ctx = (omp_context *) value; delete ctx->cb.decl_map; if (ctx->field_map) splay_tree_delete (ctx->field_map); if (ctx->sfield_map) splay_tree_delete (ctx->sfield_map); /* Reduction map is copied to nested contexts, so only delete it in the owner. */ if (ctx->reduction_map && gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET && is_gimple_omp_offloaded (ctx->stmt) && is_gimple_omp_oacc (ctx->stmt)) splay_tree_delete (ctx->reduction_map); /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before it produces corrupt debug information. */ if (ctx->record_type) { tree t; for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t)) DECL_ABSTRACT_ORIGIN (t) = NULL; } if (ctx->srecord_type) { tree t; for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t)) DECL_ABSTRACT_ORIGIN (t) = NULL; } if (is_task_ctx (ctx)) finalize_task_copyfn (as_a <gomp_task *> (ctx->stmt)); XDELETE (ctx); } /* Fix up RECEIVER_DECL with a type that has been remapped to the child context. */ static void fixup_child_record_type (omp_context *ctx) { tree f, type = ctx->record_type; /* ??? It isn't sufficient to just call remap_type here, because variably_modified_type_p doesn't work the way we expect for record types. Testing each field for whether it needs remapping and creating a new record by hand works, however. */ for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f)) if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn)) break; if (f) { tree name, new_fields = NULL; type = lang_hooks.types.make_type (RECORD_TYPE); name = DECL_NAME (TYPE_NAME (ctx->record_type)); name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl), TYPE_DECL, name, type); TYPE_NAME (type) = name; for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f)) { tree new_f = copy_node (f); DECL_CONTEXT (new_f) = type; TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb); DECL_CHAIN (new_f) = new_fields; walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL); walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &ctx->cb, NULL); walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r, &ctx->cb, NULL); new_fields = new_f; /* Arrange to be able to look up the receiver field given the sender field. */ splay_tree_insert (ctx->field_map, (splay_tree_key) f, (splay_tree_value) new_f); } TYPE_FIELDS (type) = nreverse (new_fields); layout_type (type); } TREE_TYPE (ctx->receiver_decl) = build_qualified_type (build_reference_type (type), TYPE_QUAL_RESTRICT); } /* Instantiate decls as necessary in CTX to satisfy the data sharing specified by CLAUSES. */ static void scan_sharing_clauses (tree clauses, omp_context *ctx) { tree c, decl; bool scan_array_reductions = false; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { bool by_ref; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: decl = OMP_CLAUSE_DECL (c); if (OMP_CLAUSE_PRIVATE_OUTER_REF (c)) goto do_private; else if (!is_variable_sized (decl)) install_var_local (decl, ctx); break; case OMP_CLAUSE_SHARED: decl = OMP_CLAUSE_DECL (c); /* Ignore shared directives in teams construct. */ if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS) { /* Global variables don't need to be copied, the receiver side will use them directly. */ tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx); if (is_global_var (odecl)) break; insert_decl_map (&ctx->cb, decl, odecl); break; } gcc_assert (is_taskreg_ctx (ctx)); gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl)) || !is_variable_sized (decl)); /* Global variables don't need to be copied, the receiver side will use them directly. */ if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) break; by_ref = use_pointer_for_field (decl, ctx); if (! TREE_READONLY (decl) || TREE_ADDRESSABLE (decl) || by_ref || is_reference (decl)) { install_var_field (decl, by_ref, 3, ctx); install_var_local (decl, ctx); break; } /* We don't need to copy const scalar vars back. */ OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE); goto do_private; case OMP_CLAUSE_LASTPRIVATE: /* Let the corresponding firstprivate clause create the variable. */ if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_FIRSTPRIVATE: if (is_gimple_omp_oacc (ctx->stmt)) { sorry ("clause not supported yet"); break; } /* FALLTHRU */ case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_LINEAR: decl = OMP_CLAUSE_DECL (c); do_private: if (is_variable_sized (decl)) { if (is_task_ctx (ctx)) install_var_field (decl, false, 1, ctx); break; } else if (is_taskreg_ctx (ctx)) { bool global = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)); by_ref = use_pointer_for_field (decl, NULL); if (is_task_ctx (ctx) && (global || by_ref || is_reference (decl))) { install_var_field (decl, false, 1, ctx); if (!global) install_var_field (decl, by_ref, 2, ctx); } else if (!global) install_var_field (decl, by_ref, 3, ctx); } install_var_local (decl, ctx); if (is_gimple_omp_oacc (ctx->stmt) && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) { /* Create a decl for the reduction array. */ tree var = OMP_CLAUSE_DECL (c); tree type = get_base_type (var); tree ptype = build_pointer_type (type); tree array = create_tmp_var (ptype, oacc_get_reduction_array_id (var)); omp_context *c = (ctx->field_map ? ctx : ctx->outer); install_var_field (array, true, 3, c); install_var_local (array, c); /* Insert it into the current context. */ splay_tree_insert (ctx->reduction_map, (splay_tree_key) oacc_get_reduction_array_id (var), (splay_tree_value) array); splay_tree_insert (ctx->reduction_map, (splay_tree_key) array, (splay_tree_value) array); } break; case OMP_CLAUSE__LOOPTEMP_: gcc_assert (is_parallel_ctx (ctx)); decl = OMP_CLAUSE_DECL (c); install_var_field (decl, false, 3, ctx); install_var_local (decl, ctx); break; case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_COPYIN: decl = OMP_CLAUSE_DECL (c); by_ref = use_pointer_for_field (decl, NULL); install_var_field (decl, by_ref, 3, ctx); break; case OMP_CLAUSE_DEFAULT: ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c); break; case OMP_CLAUSE_FINAL: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_THREAD_LIMIT: case OMP_CLAUSE_DEVICE: case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_DIST_SCHEDULE: case OMP_CLAUSE_DEPEND: case OMP_CLAUSE__CILK_FOR_COUNT_: case OMP_CLAUSE_NUM_GANGS: case OMP_CLAUSE_NUM_WORKERS: case OMP_CLAUSE_VECTOR_LENGTH: if (ctx->outer) scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer); break; case OMP_CLAUSE_TO: case OMP_CLAUSE_FROM: case OMP_CLAUSE_MAP: if (ctx->outer) scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer); decl = OMP_CLAUSE_DECL (c); /* Global variables with "omp declare target" attribute don't need to be copied, the receiver side will use them directly. */ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && DECL_P (decl) && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)) && varpool_node::get_create (decl)->offloadable) break; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER) { /* Ignore GOMP_MAP_POINTER kind for arrays in regions that are not offloaded; there is nothing to map for those. */ if (!is_gimple_omp_offloaded (ctx->stmt) && !POINTER_TYPE_P (TREE_TYPE (decl)) && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)) break; } if (DECL_P (decl)) { if (DECL_SIZE (decl) && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) { tree decl2 = DECL_VALUE_EXPR (decl); gcc_assert (TREE_CODE (decl2) == INDIRECT_REF); decl2 = TREE_OPERAND (decl2, 0); gcc_assert (DECL_P (decl2)); install_var_field (decl2, true, 3, ctx); install_var_local (decl2, ctx); install_var_local (decl, ctx); } else { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) install_var_field (decl, true, 7, ctx); else install_var_field (decl, true, 3, ctx); if (is_gimple_omp_offloaded (ctx->stmt)) install_var_local (decl, ctx); } } else { tree base = get_base_address (decl); tree nc = OMP_CLAUSE_CHAIN (c); if (DECL_P (base) && nc != NULL_TREE && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP && OMP_CLAUSE_DECL (nc) == base && OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_POINTER && integer_zerop (OMP_CLAUSE_SIZE (nc))) { OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1; OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1; } else { if (ctx->outer) { scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer); decl = OMP_CLAUSE_DECL (c); } gcc_assert (!splay_tree_lookup (ctx->field_map, (splay_tree_key) decl)); tree field = build_decl (OMP_CLAUSE_LOCATION (c), FIELD_DECL, NULL_TREE, ptr_type_node); DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node); insert_field_into_struct (ctx->record_type, field); splay_tree_insert (ctx->field_map, (splay_tree_key) decl, (splay_tree_value) field); } } break; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_PROC_BIND: case OMP_CLAUSE_SAFELEN: case OMP_CLAUSE_ASYNC: case OMP_CLAUSE_WAIT: case OMP_CLAUSE_GANG: case OMP_CLAUSE_WORKER: case OMP_CLAUSE_VECTOR: break; case OMP_CLAUSE_ALIGNED: decl = OMP_CLAUSE_DECL (c); if (is_global_var (decl) && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) install_var_local (decl, ctx); break; case OMP_CLAUSE_DEVICE_RESIDENT: case OMP_CLAUSE_USE_DEVICE: case OMP_CLAUSE__CACHE_: case OMP_CLAUSE_INDEPENDENT: case OMP_CLAUSE_AUTO: case OMP_CLAUSE_SEQ: sorry ("Clause not supported yet"); break; default: gcc_unreachable (); } } for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_LASTPRIVATE: /* Let the corresponding firstprivate clause create the variable. */ if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c)) scan_array_reductions = true; if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_FIRSTPRIVATE: if (is_gimple_omp_oacc (ctx->stmt)) { sorry ("clause not supported yet"); break; } /* FALLTHRU */ case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_LINEAR: decl = OMP_CLAUSE_DECL (c); if (is_variable_sized (decl)) install_var_local (decl, ctx); fixup_remapped_decl (decl, ctx, OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE && OMP_CLAUSE_PRIVATE_DEBUG (c)); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) scan_array_reductions = true; else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c)) scan_array_reductions = true; break; case OMP_CLAUSE_SHARED: /* Ignore shared directives in teams construct. */ if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS) break; decl = OMP_CLAUSE_DECL (c); if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) fixup_remapped_decl (decl, ctx, false); break; case OMP_CLAUSE_MAP: if (!is_gimple_omp_offloaded (ctx->stmt)) break; decl = OMP_CLAUSE_DECL (c); if (DECL_P (decl) && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)) && varpool_node::get_create (decl)->offloadable) break; if (DECL_P (decl)) { if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE && !COMPLETE_TYPE_P (TREE_TYPE (decl))) { tree new_decl = lookup_decl (decl, ctx); TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb); } else if (DECL_SIZE (decl) && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) { tree decl2 = DECL_VALUE_EXPR (decl); gcc_assert (TREE_CODE (decl2) == INDIRECT_REF); decl2 = TREE_OPERAND (decl2, 0); gcc_assert (DECL_P (decl2)); fixup_remapped_decl (decl2, ctx, false); fixup_remapped_decl (decl, ctx, true); } else fixup_remapped_decl (decl, ctx, false); } break; case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_THREAD_LIMIT: case OMP_CLAUSE_DEVICE: case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_DIST_SCHEDULE: case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_FINAL: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_PROC_BIND: case OMP_CLAUSE_SAFELEN: case OMP_CLAUSE_ALIGNED: case OMP_CLAUSE_DEPEND: case OMP_CLAUSE__LOOPTEMP_: case OMP_CLAUSE_TO: case OMP_CLAUSE_FROM: case OMP_CLAUSE__CILK_FOR_COUNT_: case OMP_CLAUSE_ASYNC: case OMP_CLAUSE_WAIT: case OMP_CLAUSE_NUM_GANGS: case OMP_CLAUSE_NUM_WORKERS: case OMP_CLAUSE_VECTOR_LENGTH: case OMP_CLAUSE_GANG: case OMP_CLAUSE_WORKER: case OMP_CLAUSE_VECTOR: break; case OMP_CLAUSE_DEVICE_RESIDENT: case OMP_CLAUSE_USE_DEVICE: case OMP_CLAUSE__CACHE_: case OMP_CLAUSE_INDEPENDENT: case OMP_CLAUSE_AUTO: case OMP_CLAUSE_SEQ: sorry ("Clause not supported yet"); break; default: gcc_unreachable (); } } gcc_checking_assert (!scan_array_reductions || !is_gimple_omp_oacc (ctx->stmt)); if (scan_array_reductions) for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx); scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx); } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c)) scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx); else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c)) scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx); } /* Create a new name for omp child function. Returns an identifier. If IS_CILK_FOR is true then the suffix for the child function is "_cilk_for_fn." */ static tree create_omp_child_function_name (bool task_copy, bool is_cilk_for) { if (is_cilk_for) return clone_function_name (current_function_decl, "_cilk_for_fn"); return clone_function_name (current_function_decl, task_copy ? "_omp_cpyfn" : "_omp_fn"); } /* Returns the type of the induction variable for the child function for _Cilk_for and the types for _high and _low variables based on TYPE. */ static tree cilk_for_check_loop_diff_type (tree type) { if (TYPE_PRECISION (type) <= TYPE_PRECISION (uint32_type_node)) { if (TYPE_UNSIGNED (type)) return uint32_type_node; else return integer_type_node; } else { if (TYPE_UNSIGNED (type)) return uint64_type_node; else return long_long_integer_type_node; } } /* Build a decl for the omp child function. It'll not contain a body yet, just the bare decl. */ static void create_omp_child_function (omp_context *ctx, bool task_copy) { tree decl, type, name, t; tree cilk_for_count = (flag_cilkplus && gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL) ? find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt), OMP_CLAUSE__CILK_FOR_COUNT_) : NULL_TREE; tree cilk_var_type = NULL_TREE; name = create_omp_child_function_name (task_copy, cilk_for_count != NULL_TREE); if (task_copy) type = build_function_type_list (void_type_node, ptr_type_node, ptr_type_node, NULL_TREE); else if (cilk_for_count) { type = TREE_TYPE (OMP_CLAUSE_OPERAND (cilk_for_count, 0)); cilk_var_type = cilk_for_check_loop_diff_type (type); type = build_function_type_list (void_type_node, ptr_type_node, cilk_var_type, cilk_var_type, NULL_TREE); } else type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE); decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type); gcc_checking_assert (!is_gimple_omp_oacc (ctx->stmt) || !task_copy); if (!task_copy) ctx->cb.dst_fn = decl; else gimple_omp_task_set_copy_fn (ctx->stmt, decl); TREE_STATIC (decl) = 1; TREE_USED (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 0; TREE_PUBLIC (decl) = 0; DECL_UNINLINABLE (decl) = 1; DECL_EXTERNAL (decl) = 0; DECL_CONTEXT (decl) = NULL_TREE; DECL_INITIAL (decl) = make_node (BLOCK); if (cgraph_node::get (current_function_decl)->offloadable) cgraph_node::get_create (decl)->offloadable = 1; else { omp_context *octx; for (octx = ctx; octx; octx = octx->outer) if (is_gimple_omp_offloaded (octx->stmt)) { cgraph_node::get_create (decl)->offloadable = 1; #ifdef ENABLE_OFFLOADING g->have_offload = true; #endif break; } } if (cgraph_node::get_create (decl)->offloadable && !lookup_attribute ("omp declare target", DECL_ATTRIBUTES (current_function_decl))) DECL_ATTRIBUTES (decl) = tree_cons (get_identifier ("omp target entrypoint"), NULL_TREE, DECL_ATTRIBUTES (decl)); t = build_decl (DECL_SOURCE_LOCATION (decl), RESULT_DECL, NULL_TREE, void_type_node); DECL_ARTIFICIAL (t) = 1; DECL_IGNORED_P (t) = 1; DECL_CONTEXT (t) = decl; DECL_RESULT (decl) = t; /* _Cilk_for's child function requires two extra parameters called __low and __high that are set the by Cilk runtime when it calls this function. */ if (cilk_for_count) { t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, get_identifier ("__high"), cilk_var_type); DECL_ARTIFICIAL (t) = 1; DECL_NAMELESS (t) = 1; DECL_ARG_TYPE (t) = ptr_type_node; DECL_CONTEXT (t) = current_function_decl; TREE_USED (t) = 1; DECL_CHAIN (t) = DECL_ARGUMENTS (decl); DECL_ARGUMENTS (decl) = t; t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, get_identifier ("__low"), cilk_var_type); DECL_ARTIFICIAL (t) = 1; DECL_NAMELESS (t) = 1; DECL_ARG_TYPE (t) = ptr_type_node; DECL_CONTEXT (t) = current_function_decl; TREE_USED (t) = 1; DECL_CHAIN (t) = DECL_ARGUMENTS (decl); DECL_ARGUMENTS (decl) = t; } tree data_name = get_identifier (".omp_data_i"); t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name, ptr_type_node); DECL_ARTIFICIAL (t) = 1; DECL_NAMELESS (t) = 1; DECL_ARG_TYPE (t) = ptr_type_node; DECL_CONTEXT (t) = current_function_decl; TREE_USED (t) = 1; if (cilk_for_count) DECL_CHAIN (t) = DECL_ARGUMENTS (decl); DECL_ARGUMENTS (decl) = t; if (!task_copy) ctx->receiver_decl = t; else { t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, get_identifier (".omp_data_o"), ptr_type_node); DECL_ARTIFICIAL (t) = 1; DECL_NAMELESS (t) = 1; DECL_ARG_TYPE (t) = ptr_type_node; DECL_CONTEXT (t) = current_function_decl; TREE_USED (t) = 1; TREE_ADDRESSABLE (t) = 1; DECL_CHAIN (t) = DECL_ARGUMENTS (decl); DECL_ARGUMENTS (decl) = t; } /* Allocate memory for the function structure. The call to allocate_struct_function clobbers CFUN, so we need to restore it afterward. */ push_struct_function (decl); cfun->function_end_locus = gimple_location (ctx->stmt); pop_cfun (); } /* Callback for walk_gimple_seq. Check if combined parallel contains gimple_omp_for_combined_into_p OMP_FOR. */ static tree find_combined_for (gimple_stmt_iterator *gsi_p, bool *handled_ops_p, struct walk_stmt_info *wi) { gimple stmt = gsi_stmt (*gsi_p); *handled_ops_p = true; switch (gimple_code (stmt)) { WALK_SUBSTMTS; case GIMPLE_OMP_FOR: if (gimple_omp_for_combined_into_p (stmt) && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR) { wi->info = stmt; return integer_zero_node; } break; default: break; } return NULL; } /* Scan an OpenMP parallel directive. */ static void scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx) { omp_context *ctx; tree name; gomp_parallel *stmt = as_a <gomp_parallel *> (gsi_stmt (*gsi)); /* Ignore parallel directives with empty bodies, unless there are copyin clauses. */ if (optimize > 0 && empty_body_p (gimple_omp_body (stmt)) && find_omp_clause (gimple_omp_parallel_clauses (stmt), OMP_CLAUSE_COPYIN) == NULL) { gsi_replace (gsi, gimple_build_nop (), false); return; } if (gimple_omp_parallel_combined_p (stmt)) { struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); wi.val_only = true; walk_gimple_seq (gimple_omp_body (stmt), find_combined_for, NULL, &wi); if (wi.info) { gomp_for *for_stmt = as_a <gomp_for *> ((gimple) wi.info); struct omp_for_data fd; extract_omp_for_data (for_stmt, &fd, NULL); /* We need two temporaries with fd.loop.v type (istart/iend) and then (fd.collapse - 1) temporaries with the same type for count2 ... countN-1 vars if not constant. */ size_t count = 2, i; tree type = fd.iter_type; if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST) count += fd.collapse - 1; for (i = 0; i < count; i++) { tree temp = create_tmp_var (type); tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_); insert_decl_map (&outer_ctx->cb, temp, temp); OMP_CLAUSE_DECL (c) = temp; OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt); gimple_omp_parallel_set_clauses (stmt, c); } } } ctx = new_omp_context (stmt, outer_ctx); taskreg_contexts.safe_push (ctx); if (taskreg_nesting_level > 1) ctx->is_nested = true; ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED; ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE); name = create_tmp_var_name (".omp_data_s"); name = build_decl (gimple_location (stmt), TYPE_DECL, name, ctx->record_type); DECL_ARTIFICIAL (name) = 1; DECL_NAMELESS (name) = 1; TYPE_NAME (ctx->record_type) = name; create_omp_child_function (ctx, false); gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn); scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx); scan_omp (gimple_omp_body_ptr (stmt), ctx); if (TYPE_FIELDS (ctx->record_type) == NULL) ctx->record_type = ctx->receiver_decl = NULL; } /* Scan an OpenMP task directive. */ static void scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx) { omp_context *ctx; tree name, t; gomp_task *stmt = as_a <gomp_task *> (gsi_stmt (*gsi)); /* Ignore task directives with empty bodies. */ if (optimize > 0 && empty_body_p (gimple_omp_body (stmt))) { gsi_replace (gsi, gimple_build_nop (), false); return; } ctx = new_omp_context (stmt, outer_ctx); taskreg_contexts.safe_push (ctx); if (taskreg_nesting_level > 1) ctx->is_nested = true; ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED; ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE); name = create_tmp_var_name (".omp_data_s"); name = build_decl (gimple_location (stmt), TYPE_DECL, name, ctx->record_type); DECL_ARTIFICIAL (name) = 1; DECL_NAMELESS (name) = 1; TYPE_NAME (ctx->record_type) = name; create_omp_child_function (ctx, false); gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn); scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx); if (ctx->srecord_type) { name = create_tmp_var_name (".omp_data_a"); name = build_decl (gimple_location (stmt), TYPE_DECL, name, ctx->srecord_type); DECL_ARTIFICIAL (name) = 1; DECL_NAMELESS (name) = 1; TYPE_NAME (ctx->srecord_type) = name; create_omp_child_function (ctx, true); } scan_omp (gimple_omp_body_ptr (stmt), ctx); if (TYPE_FIELDS (ctx->record_type) == NULL) { ctx->record_type = ctx->receiver_decl = NULL; t = build_int_cst (long_integer_type_node, 0); gimple_omp_task_set_arg_size (stmt, t); t = build_int_cst (long_integer_type_node, 1); gimple_omp_task_set_arg_align (stmt, t); } } /* If any decls have been made addressable during scan_omp, adjust their fields if needed, and layout record types of parallel/task constructs. */ static void finish_taskreg_scan (omp_context *ctx) { if (ctx->record_type == NULL_TREE) return; /* If any task_shared_vars were needed, verify all OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK} statements if use_pointer_for_field hasn't changed because of that. If it did, update field types now. */ if (task_shared_vars) { tree c; for (c = gimple_omp_taskreg_clauses (ctx->stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED) { tree decl = OMP_CLAUSE_DECL (c); /* Global variables don't need to be copied, the receiver side will use them directly. */ if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) continue; if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl)) || !use_pointer_for_field (decl, ctx)) continue; tree field = lookup_field (decl, ctx); if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE && TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl)) continue; TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl)); TREE_THIS_VOLATILE (field) = 0; DECL_USER_ALIGN (field) = 0; DECL_ALIGN (field) = TYPE_ALIGN (TREE_TYPE (field)); if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field)) TYPE_ALIGN (ctx->record_type) = DECL_ALIGN (field); if (ctx->srecord_type) { tree sfield = lookup_sfield (decl, ctx); TREE_TYPE (sfield) = TREE_TYPE (field); TREE_THIS_VOLATILE (sfield) = 0; DECL_USER_ALIGN (sfield) = 0; DECL_ALIGN (sfield) = DECL_ALIGN (field); if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield)) TYPE_ALIGN (ctx->srecord_type) = DECL_ALIGN (sfield); } } } if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL) { layout_type (ctx->record_type); fixup_child_record_type (ctx); } else { location_t loc = gimple_location (ctx->stmt); tree *p, vla_fields = NULL_TREE, *q = &vla_fields; /* Move VLA fields to the end. */ p = &TYPE_FIELDS (ctx->record_type); while (*p) if (!TYPE_SIZE_UNIT (TREE_TYPE (*p)) || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p)))) { *q = *p; *p = TREE_CHAIN (*p); TREE_CHAIN (*q) = NULL_TREE; q = &TREE_CHAIN (*q); } else p = &DECL_CHAIN (*p); *p = vla_fields; layout_type (ctx->record_type); fixup_child_record_type (ctx); if (ctx->srecord_type) layout_type (ctx->srecord_type); tree t = fold_convert_loc (loc, long_integer_type_node, TYPE_SIZE_UNIT (ctx->record_type)); gimple_omp_task_set_arg_size (ctx->stmt, t); t = build_int_cst (long_integer_type_node, TYPE_ALIGN_UNIT (ctx->record_type)); gimple_omp_task_set_arg_align (ctx->stmt, t); } } static omp_context * enclosing_target_ctx (omp_context *ctx) { while (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET) ctx = ctx->outer; gcc_assert (ctx != NULL); return ctx; } static bool oacc_loop_or_target_p (gimple stmt) { enum gimple_code outer_type = gimple_code (stmt); return ((outer_type == GIMPLE_OMP_TARGET && ((gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_OACC_PARALLEL) || (gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_OACC_KERNELS))) || (outer_type == GIMPLE_OMP_FOR && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_OACC_LOOP)); } /* Scan a GIMPLE_OMP_FOR. */ static void scan_omp_for (gomp_for *stmt, omp_context *outer_ctx) { enum gimple_code outer_type = GIMPLE_ERROR_MARK; omp_context *ctx; size_t i; tree clauses = gimple_omp_for_clauses (stmt); if (outer_ctx) outer_type = gimple_code (outer_ctx->stmt); ctx = new_omp_context (stmt, outer_ctx); if (is_gimple_omp_oacc (stmt)) { if (outer_ctx && outer_type == GIMPLE_OMP_FOR) ctx->gwv_this = outer_ctx->gwv_this; for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { int val; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_GANG) val = MASK_GANG; else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_WORKER) val = MASK_WORKER; else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_VECTOR) val = MASK_VECTOR; else continue; ctx->gwv_this |= val; if (!outer_ctx) { /* Skip; not nested inside a region. */ continue; } if (!oacc_loop_or_target_p (outer_ctx->stmt)) { /* Skip; not nested inside an OpenACC region. */ continue; } if (outer_type == GIMPLE_OMP_FOR) outer_ctx->gwv_below |= val; if (OMP_CLAUSE_OPERAND (c, 0) != NULL_TREE) { omp_context *enclosing = enclosing_target_ctx (outer_ctx); if (gimple_omp_target_kind (enclosing->stmt) == GF_OMP_TARGET_KIND_OACC_PARALLEL) error_at (gimple_location (stmt), "no arguments allowed to gang, worker and vector clauses inside parallel"); } } } scan_sharing_clauses (clauses, ctx); scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx); for (i = 0; i < gimple_omp_for_collapse (stmt); i++) { scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx); scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx); scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx); scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx); } scan_omp (gimple_omp_body_ptr (stmt), ctx); if (is_gimple_omp_oacc (stmt)) { if (ctx->gwv_this & ctx->gwv_below) error_at (gimple_location (stmt), "gang, worker and vector may occur only once in a loop nest"); else if (ctx->gwv_below != 0 && ctx->gwv_this > ctx->gwv_below) error_at (gimple_location (stmt), "gang, worker and vector must occur in this order in a loop nest"); if (outer_ctx && outer_type == GIMPLE_OMP_FOR) outer_ctx->gwv_below |= ctx->gwv_below; } } /* Scan an OpenMP sections directive. */ static void scan_omp_sections (gomp_sections *stmt, omp_context *outer_ctx) { omp_context *ctx; ctx = new_omp_context (stmt, outer_ctx); scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx); scan_omp (gimple_omp_body_ptr (stmt), ctx); } /* Scan an OpenMP single directive. */ static void scan_omp_single (gomp_single *stmt, omp_context *outer_ctx) { omp_context *ctx; tree name; ctx = new_omp_context (stmt, outer_ctx); ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE); name = create_tmp_var_name (".omp_copy_s"); name = build_decl (gimple_location (stmt), TYPE_DECL, name, ctx->record_type); TYPE_NAME (ctx->record_type) = name; scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx); scan_omp (gimple_omp_body_ptr (stmt), ctx); if (TYPE_FIELDS (ctx->record_type) == NULL) ctx->record_type = NULL; else layout_type (ctx->record_type); } /* Scan a GIMPLE_OMP_TARGET. */ static void scan_omp_target (gomp_target *stmt, omp_context *outer_ctx) { omp_context *ctx; tree name; bool offloaded = is_gimple_omp_offloaded (stmt); tree clauses = gimple_omp_target_clauses (stmt); ctx = new_omp_context (stmt, outer_ctx); ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED; ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE); name = create_tmp_var_name (".omp_data_t"); name = build_decl (gimple_location (stmt), TYPE_DECL, name, ctx->record_type); DECL_ARTIFICIAL (name) = 1; DECL_NAMELESS (name) = 1; TYPE_NAME (ctx->record_type) = name; if (offloaded) { if (is_gimple_omp_oacc (stmt)) ctx->reduction_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); create_omp_child_function (ctx, false); gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn); } if (is_gimple_omp_oacc (stmt)) { for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_GANGS) ctx->gwv_this |= MASK_GANG; else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_WORKERS) ctx->gwv_this |= MASK_WORKER; else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_VECTOR_LENGTH) ctx->gwv_this |= MASK_VECTOR; } } scan_sharing_clauses (clauses, ctx); scan_omp (gimple_omp_body_ptr (stmt), ctx); if (TYPE_FIELDS (ctx->record_type) == NULL) ctx->record_type = ctx->receiver_decl = NULL; else { TYPE_FIELDS (ctx->record_type) = nreverse (TYPE_FIELDS (ctx->record_type)); #ifdef ENABLE_CHECKING tree field; unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type)); for (field = TYPE_FIELDS (ctx->record_type); field; field = DECL_CHAIN (field)) gcc_assert (DECL_ALIGN (field) == align); #endif layout_type (ctx->record_type); if (offloaded) fixup_child_record_type (ctx); } } /* Scan an OpenMP teams directive. */ static void scan_omp_teams (gomp_teams *stmt, omp_context *outer_ctx) { omp_context *ctx = new_omp_context (stmt, outer_ctx); scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx); scan_omp (gimple_omp_body_ptr (stmt), ctx); } /* Check nesting restrictions. */ static bool check_omp_nesting_restrictions (gimple stmt, omp_context *ctx) { /* No nesting of non-OpenACC STMT (that is, an OpenMP one, or a GOMP builtin) inside an OpenACC CTX. */ if (!(is_gimple_omp (stmt) && is_gimple_omp_oacc (stmt))) { for (omp_context *ctx_ = ctx; ctx_ != NULL; ctx_ = ctx_->outer) if (is_gimple_omp (ctx_->stmt) && is_gimple_omp_oacc (ctx_->stmt)) { error_at (gimple_location (stmt), "non-OpenACC construct inside of OpenACC region"); return false; } } if (ctx != NULL) { if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD) { error_at (gimple_location (stmt), "OpenMP constructs may not be nested inside simd region"); return false; } else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS) { if ((gimple_code (stmt) != GIMPLE_OMP_FOR || (gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_DISTRIBUTE)) && gimple_code (stmt) != GIMPLE_OMP_PARALLEL) { error_at (gimple_location (stmt), "only distribute or parallel constructs are allowed to " "be closely nested inside teams construct"); return false; } } } switch (gimple_code (stmt)) { case GIMPLE_OMP_FOR: if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD) return true; if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE) { if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS) { error_at (gimple_location (stmt), "distribute construct must be closely nested inside " "teams construct"); return false; } return true; } /* FALLTHRU */ case GIMPLE_CALL: if (is_gimple_call (stmt) && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt)) == BUILT_IN_GOMP_CANCEL || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt)) == BUILT_IN_GOMP_CANCELLATION_POINT)) { const char *bad = NULL; const char *kind = NULL; if (ctx == NULL) { error_at (gimple_location (stmt), "orphaned %qs construct", DECL_FUNCTION_CODE (gimple_call_fndecl (stmt)) == BUILT_IN_GOMP_CANCEL ? "#pragma omp cancel" : "#pragma omp cancellation point"); return false; } switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0)) ? tree_to_shwi (gimple_call_arg (stmt, 0)) : 0) { case 1: if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL) bad = "#pragma omp parallel"; else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt)) == BUILT_IN_GOMP_CANCEL && !integer_zerop (gimple_call_arg (stmt, 1))) ctx->cancellable = true; kind = "parallel"; break; case 2: if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR) bad = "#pragma omp for"; else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt)) == BUILT_IN_GOMP_CANCEL && !integer_zerop (gimple_call_arg (stmt, 1))) { ctx->cancellable = true; if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt), OMP_CLAUSE_NOWAIT)) warning_at (gimple_location (stmt), 0, "%<#pragma omp cancel for%> inside " "%<nowait%> for construct"); if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt), OMP_CLAUSE_ORDERED)) warning_at (gimple_location (stmt), 0, "%<#pragma omp cancel for%> inside " "%<ordered%> for construct"); } kind = "for"; break; case 4: if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION) bad = "#pragma omp sections"; else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt)) == BUILT_IN_GOMP_CANCEL && !integer_zerop (gimple_call_arg (stmt, 1))) { if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS) { ctx->cancellable = true; if (find_omp_clause (gimple_omp_sections_clauses (ctx->stmt), OMP_CLAUSE_NOWAIT)) warning_at (gimple_location (stmt), 0, "%<#pragma omp cancel sections%> inside " "%<nowait%> sections construct"); } else { gcc_assert (ctx->outer && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_SECTIONS); ctx->outer->cancellable = true; if (find_omp_clause (gimple_omp_sections_clauses (ctx->outer->stmt), OMP_CLAUSE_NOWAIT)) warning_at (gimple_location (stmt), 0, "%<#pragma omp cancel sections%> inside " "%<nowait%> sections construct"); } } kind = "sections"; break; case 8: if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK) bad = "#pragma omp task"; else ctx->cancellable = true; kind = "taskgroup"; break; default: error_at (gimple_location (stmt), "invalid arguments"); return false; } if (bad) { error_at (gimple_location (stmt), "%<%s %s%> construct not closely nested inside of %qs", DECL_FUNCTION_CODE (gimple_call_fndecl (stmt)) == BUILT_IN_GOMP_CANCEL ? "#pragma omp cancel" : "#pragma omp cancellation point", kind, bad); return false; } } /* FALLTHRU */ case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: for (; ctx != NULL; ctx = ctx->outer) switch (gimple_code (ctx->stmt)) { case GIMPLE_OMP_FOR: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_TASK: case GIMPLE_OMP_CRITICAL: if (is_gimple_call (stmt)) { if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt)) != BUILT_IN_GOMP_BARRIER) return true; error_at (gimple_location (stmt), "barrier region may not be closely nested inside " "of work-sharing, critical, ordered, master or " "explicit task region"); return false; } error_at (gimple_location (stmt), "work-sharing region may not be closely nested inside " "of work-sharing, critical, ordered, master or explicit " "task region"); return false; case GIMPLE_OMP_PARALLEL: return true; default: break; } break; case GIMPLE_OMP_MASTER: for (; ctx != NULL; ctx = ctx->outer) switch (gimple_code (ctx->stmt)) { case GIMPLE_OMP_FOR: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_TASK: error_at (gimple_location (stmt), "master region may not be closely nested inside " "of work-sharing or explicit task region"); return false; case GIMPLE_OMP_PARALLEL: return true; default: break; } break; case GIMPLE_OMP_ORDERED: for (; ctx != NULL; ctx = ctx->outer) switch (gimple_code (ctx->stmt)) { case GIMPLE_OMP_CRITICAL: case GIMPLE_OMP_TASK: error_at (gimple_location (stmt), "ordered region may not be closely nested inside " "of critical or explicit task region"); return false; case GIMPLE_OMP_FOR: if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt), OMP_CLAUSE_ORDERED) == NULL) { error_at (gimple_location (stmt), "ordered region must be closely nested inside " "a loop region with an ordered clause"); return false; } return true; case GIMPLE_OMP_PARALLEL: error_at (gimple_location (stmt), "ordered region must be closely nested inside " "a loop region with an ordered clause"); return false; default: break; } break; case GIMPLE_OMP_CRITICAL: { tree this_stmt_name = gimple_omp_critical_name (as_a <gomp_critical *> (stmt)); for (; ctx != NULL; ctx = ctx->outer) if (gomp_critical *other_crit = dyn_cast <gomp_critical *> (ctx->stmt)) if (this_stmt_name == gimple_omp_critical_name (other_crit)) { error_at (gimple_location (stmt), "critical region may not be nested inside a critical " "region with the same name"); return false; } } break; case GIMPLE_OMP_TEAMS: if (ctx == NULL || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION) { error_at (gimple_location (stmt), "teams construct not closely nested inside of target " "region"); return false; } break; case GIMPLE_OMP_TARGET: for (; ctx != NULL; ctx = ctx->outer) { if (gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET) { if (is_gimple_omp (stmt) && is_gimple_omp_oacc (stmt) && is_gimple_omp (ctx->stmt)) { error_at (gimple_location (stmt), "OpenACC construct inside of non-OpenACC region"); return false; } continue; } const char *stmt_name, *ctx_stmt_name; switch (gimple_omp_target_kind (stmt)) { case GF_OMP_TARGET_KIND_REGION: stmt_name = "target"; break; case GF_OMP_TARGET_KIND_DATA: stmt_name = "target data"; break; case GF_OMP_TARGET_KIND_UPDATE: stmt_name = "target update"; break; case GF_OMP_TARGET_KIND_OACC_PARALLEL: stmt_name = "parallel"; break; case GF_OMP_TARGET_KIND_OACC_KERNELS: stmt_name = "kernels"; break; case GF_OMP_TARGET_KIND_OACC_DATA: stmt_name = "data"; break; case GF_OMP_TARGET_KIND_OACC_UPDATE: stmt_name = "update"; break; case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: stmt_name = "enter/exit data"; break; default: gcc_unreachable (); } switch (gimple_omp_target_kind (ctx->stmt)) { case GF_OMP_TARGET_KIND_REGION: ctx_stmt_name = "target"; break; case GF_OMP_TARGET_KIND_DATA: ctx_stmt_name = "target data"; break; case GF_OMP_TARGET_KIND_OACC_PARALLEL: ctx_stmt_name = "parallel"; break; case GF_OMP_TARGET_KIND_OACC_KERNELS: ctx_stmt_name = "kernels"; break; case GF_OMP_TARGET_KIND_OACC_DATA: ctx_stmt_name = "data"; break; default: gcc_unreachable (); } /* OpenACC/OpenMP mismatch? */ if (is_gimple_omp_oacc (stmt) != is_gimple_omp_oacc (ctx->stmt)) { error_at (gimple_location (stmt), "%s %s construct inside of %s %s region", (is_gimple_omp_oacc (stmt) ? "OpenACC" : "OpenMP"), stmt_name, (is_gimple_omp_oacc (ctx->stmt) ? "OpenACC" : "OpenMP"), ctx_stmt_name); return false; } if (is_gimple_omp_offloaded (ctx->stmt)) { /* No GIMPLE_OMP_TARGET inside offloaded OpenACC CTX. */ if (is_gimple_omp_oacc (ctx->stmt)) { error_at (gimple_location (stmt), "%s construct inside of %s region", stmt_name, ctx_stmt_name); return false; } else { gcc_checking_assert (!is_gimple_omp_oacc (stmt)); warning_at (gimple_location (stmt), 0, "%s construct inside of %s region", stmt_name, ctx_stmt_name); } } } break; default: break; } return true; } /* Helper function scan_omp. Callback for walk_tree or operators in walk_gimple_stmt used to scan for OMP directives in TP. */ static tree scan_omp_1_op (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = (struct walk_stmt_info *) data; omp_context *ctx = (omp_context *) wi->info; tree t = *tp; switch (TREE_CODE (t)) { case VAR_DECL: case PARM_DECL: case LABEL_DECL: case RESULT_DECL: if (ctx) *tp = remap_decl (t, &ctx->cb); break; default: if (ctx && TYPE_P (t)) *tp = remap_type (t, &ctx->cb); else if (!DECL_P (t)) { *walk_subtrees = 1; if (ctx) { tree tem = remap_type (TREE_TYPE (t), &ctx->cb); if (tem != TREE_TYPE (t)) { if (TREE_CODE (t) == INTEGER_CST) *tp = wide_int_to_tree (tem, t); else TREE_TYPE (t) = tem; } } } break; } return NULL_TREE; } /* Return true if FNDECL is a setjmp or a longjmp. */ static bool setjmp_or_longjmp_p (const_tree fndecl) { if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP)) return true; tree declname = DECL_NAME (fndecl); if (!declname) return false; const char *name = IDENTIFIER_POINTER (declname); return !strcmp (name, "setjmp") || !strcmp (name, "longjmp"); } /* Helper function for scan_omp. Callback for walk_gimple_stmt used to scan for OMP directives in the current statement in GSI. */ static tree scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p, struct walk_stmt_info *wi) { gimple stmt = gsi_stmt (*gsi); omp_context *ctx = (omp_context *) wi->info; if (gimple_has_location (stmt)) input_location = gimple_location (stmt); /* Check the nesting restrictions. */ bool remove = false; if (is_gimple_omp (stmt)) remove = !check_omp_nesting_restrictions (stmt, ctx); else if (is_gimple_call (stmt)) { tree fndecl = gimple_call_fndecl (stmt); if (fndecl) { if (setjmp_or_longjmp_p (fndecl) && ctx && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD) { remove = true; error_at (gimple_location (stmt), "setjmp/longjmp inside simd construct"); } else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) switch (DECL_FUNCTION_CODE (fndecl)) { case BUILT_IN_GOMP_BARRIER: case BUILT_IN_GOMP_CANCEL: case BUILT_IN_GOMP_CANCELLATION_POINT: case BUILT_IN_GOMP_TASKYIELD: case BUILT_IN_GOMP_TASKWAIT: case BUILT_IN_GOMP_TASKGROUP_START: case BUILT_IN_GOMP_TASKGROUP_END: remove = !check_omp_nesting_restrictions (stmt, ctx); break; default: break; } } } if (remove) { stmt = gimple_build_nop (); gsi_replace (gsi, stmt, false); } *handled_ops_p = true; switch (gimple_code (stmt)) { case GIMPLE_OMP_PARALLEL: taskreg_nesting_level++; scan_omp_parallel (gsi, ctx); taskreg_nesting_level--; break; case GIMPLE_OMP_TASK: taskreg_nesting_level++; scan_omp_task (gsi, ctx); taskreg_nesting_level--; break; case GIMPLE_OMP_FOR: scan_omp_for (as_a <gomp_for *> (stmt), ctx); break; case GIMPLE_OMP_SECTIONS: scan_omp_sections (as_a <gomp_sections *> (stmt), ctx); break; case GIMPLE_OMP_SINGLE: scan_omp_single (as_a <gomp_single *> (stmt), ctx); break; case GIMPLE_OMP_SECTION: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_TASKGROUP: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_CRITICAL: ctx = new_omp_context (stmt, ctx); scan_omp (gimple_omp_body_ptr (stmt), ctx); break; case GIMPLE_OMP_TARGET: scan_omp_target (as_a <gomp_target *> (stmt), ctx); break; case GIMPLE_OMP_TEAMS: scan_omp_teams (as_a <gomp_teams *> (stmt), ctx); break; case GIMPLE_BIND: { tree var; *handled_ops_p = false; if (ctx) for (var = gimple_bind_vars (as_a <gbind *> (stmt)); var ; var = DECL_CHAIN (var)) insert_decl_map (&ctx->cb, var, var); } break; default: *handled_ops_p = false; break; } return NULL_TREE; } /* Scan all the statements starting at the current statement. CTX contains context information about the OMP directives and clauses found during the scan. */ static void scan_omp (gimple_seq *body_p, omp_context *ctx) { location_t saved_location; struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); wi.info = ctx; wi.want_locations = true; saved_location = input_location; walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi); input_location = saved_location; } /* Re-gimplification and code generation routines. */ /* Build a call to GOMP_barrier. */ static gimple build_omp_barrier (tree lhs) { tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL : BUILT_IN_GOMP_BARRIER); gcall *g = gimple_build_call (fndecl, 0); if (lhs) gimple_call_set_lhs (g, lhs); return g; } /* If a context was created for STMT when it was scanned, return it. */ static omp_context * maybe_lookup_ctx (gimple stmt) { splay_tree_node n; n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt); return n ? (omp_context *) n->value : NULL; } /* Find the mapping for DECL in CTX or the immediately enclosing context that has a mapping for DECL. If CTX is a nested parallel directive, we may have to use the decl mappings created in CTX's parent context. Suppose that we have the following parallel nesting (variable UIDs showed for clarity): iD.1562 = 0; #omp parallel shared(iD.1562) -> outer parallel iD.1562 = iD.1562 + 1; #omp parallel shared (iD.1562) -> inner parallel iD.1562 = iD.1562 - 1; Each parallel structure will create a distinct .omp_data_s structure for copying iD.1562 in/out of the directive: outer parallel .omp_data_s.1.i -> iD.1562 inner parallel .omp_data_s.2.i -> iD.1562 A shared variable mapping will produce a copy-out operation before the parallel directive and a copy-in operation after it. So, in this case we would have: iD.1562 = 0; .omp_data_o.1.i = iD.1562; #omp parallel shared(iD.1562) -> outer parallel .omp_data_i.1 = &.omp_data_o.1 .omp_data_i.1->i = .omp_data_i.1->i + 1; .omp_data_o.2.i = iD.1562; -> ** #omp parallel shared(iD.1562) -> inner parallel .omp_data_i.2 = &.omp_data_o.2 .omp_data_i.2->i = .omp_data_i.2->i - 1; ** This is a problem. The symbol iD.1562 cannot be referenced inside the body of the outer parallel region. But since we are emitting this copy operation while expanding the inner parallel directive, we need to access the CTX structure of the outer parallel directive to get the correct mapping: .omp_data_o.2.i = .omp_data_i.1->i Since there may be other workshare or parallel directives enclosing the parallel directive, it may be necessary to walk up the context parent chain. This is not a problem in general because nested parallelism happens only rarely. */ static tree lookup_decl_in_outer_ctx (tree decl, omp_context *ctx) { tree t; omp_context *up; for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer) t = maybe_lookup_decl (decl, up); gcc_assert (!ctx->is_nested || t || is_global_var (decl)); return t ? t : decl; } /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found in outer contexts. */ static tree maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx) { tree t = NULL; omp_context *up; for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer) t = maybe_lookup_decl (decl, up); return t ? t : decl; } /* Construct the initialization value for reduction CLAUSE. */ tree omp_reduction_init (tree clause, tree type) { location_t loc = OMP_CLAUSE_LOCATION (clause); switch (OMP_CLAUSE_REDUCTION_CODE (clause)) { case PLUS_EXPR: case MINUS_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_XOR_EXPR: case NE_EXPR: return build_zero_cst (type); case MULT_EXPR: case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: case EQ_EXPR: return fold_convert_loc (loc, type, integer_one_node); case BIT_AND_EXPR: return fold_convert_loc (loc, type, integer_minus_one_node); case MAX_EXPR: if (SCALAR_FLOAT_TYPE_P (type)) { REAL_VALUE_TYPE max, min; if (HONOR_INFINITIES (type)) { real_inf (&max); real_arithmetic (&min, NEGATE_EXPR, &max, NULL); } else real_maxval (&min, 1, TYPE_MODE (type)); return build_real (type, min); } else { gcc_assert (INTEGRAL_TYPE_P (type)); return TYPE_MIN_VALUE (type); } case MIN_EXPR: if (SCALAR_FLOAT_TYPE_P (type)) { REAL_VALUE_TYPE max; if (HONOR_INFINITIES (type)) real_inf (&max); else real_maxval (&max, 0, TYPE_MODE (type)); return build_real (type, max); } else { gcc_assert (INTEGRAL_TYPE_P (type)); return TYPE_MAX_VALUE (type); } default: gcc_unreachable (); } } /* Return alignment to be assumed for var in CLAUSE, which should be OMP_CLAUSE_ALIGNED. */ static tree omp_clause_aligned_alignment (tree clause) { if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause)) return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause); /* Otherwise return implementation defined alignment. */ unsigned int al = 1; machine_mode mode, vmode; int vs = targetm.vectorize.autovectorize_vector_sizes (); if (vs) vs = 1 << floor_log2 (vs); static enum mode_class classes[] = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT }; for (int i = 0; i < 4; i += 2) for (mode = GET_CLASS_NARROWEST_MODE (classes[i]); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) { vmode = targetm.vectorize.preferred_simd_mode (mode); if (GET_MODE_CLASS (vmode) != classes[i + 1]) continue; while (vs && GET_MODE_SIZE (vmode) < vs && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode) vmode = GET_MODE_2XWIDER_MODE (vmode); tree type = lang_hooks.types.type_for_mode (mode, 1); if (type == NULL_TREE || TYPE_MODE (type) != mode) continue; type = build_vector_type (type, GET_MODE_SIZE (vmode) / GET_MODE_SIZE (mode)); if (TYPE_MODE (type) != vmode) continue; if (TYPE_ALIGN_UNIT (type) > al) al = TYPE_ALIGN_UNIT (type); } return build_int_cst (integer_type_node, al); } /* Return maximum possible vectorization factor for the target. */ static int omp_max_vf (void) { if (!optimize || optimize_debug || !flag_tree_loop_optimize || (!flag_tree_loop_vectorize && (global_options_set.x_flag_tree_loop_vectorize || global_options_set.x_flag_tree_vectorize))) return 1; int vs = targetm.vectorize.autovectorize_vector_sizes (); if (vs) { vs = 1 << floor_log2 (vs); return vs; } machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode); if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT) return GET_MODE_NUNITS (vqimode); return 1; } /* Helper function of lower_rec_input_clauses, used for #pragma omp simd privatization. */ static bool lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf, tree &idx, tree &lane, tree &ivar, tree &lvar) { if (max_vf == 0) { max_vf = omp_max_vf (); if (max_vf > 1) { tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt), OMP_CLAUSE_SAFELEN); if (c && TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) != INTEGER_CST) max_vf = 1; else if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c), max_vf) == -1) max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c)); } if (max_vf > 1) { idx = create_tmp_var (unsigned_type_node); lane = create_tmp_var (unsigned_type_node); } } if (max_vf == 1) return false; tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf); tree avar = create_tmp_var_raw (atype); if (TREE_ADDRESSABLE (new_var)) TREE_ADDRESSABLE (avar) = 1; DECL_ATTRIBUTES (avar) = tree_cons (get_identifier ("omp simd array"), NULL, DECL_ATTRIBUTES (avar)); gimple_add_tmp_var (avar); ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx, NULL_TREE, NULL_TREE); lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane, NULL_TREE, NULL_TREE); if (DECL_P (new_var)) { SET_DECL_VALUE_EXPR (new_var, lvar); DECL_HAS_VALUE_EXPR_P (new_var) = 1; } return true; } /* Helper function of lower_rec_input_clauses. For a reference in simd reduction, add an underlying variable it will reference. */ static void handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist) { tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard))); if (TREE_CONSTANT (z)) { const char *name = NULL; if (DECL_NAME (new_vard)) name = IDENTIFIER_POINTER (DECL_NAME (new_vard)); z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)), name); gimple_add_tmp_var (z); TREE_ADDRESSABLE (z) = 1; z = build_fold_addr_expr_loc (loc, z); gimplify_assign (new_vard, z, ilist); } } /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN, from the receiver (aka child) side and initializers for REFERENCE_TYPE private variables. Initialization statements go in ILIST, while calls to destructors go in DLIST. */ static void lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist, omp_context *ctx, struct omp_for_data *fd) { tree c, dtor, copyin_seq, x, ptr; bool copyin_by_ref = false; bool lastprivate_firstprivate = false; bool reduction_omp_orig_ref = false; int pass; bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD); int max_vf = 0; tree lane = NULL_TREE, idx = NULL_TREE; tree ivar = NULL_TREE, lvar = NULL_TREE; gimple_seq llist[2] = { NULL, NULL }; copyin_seq = NULL; /* Set max_vf=1 (which will later enforce safelen=1) in simd loops with data sharing clauses referencing variable sized vars. That is unnecessarily hard to support and very unlikely to result in vectorized code anyway. */ if (is_simd) for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_LINEAR: if (OMP_CLAUSE_LINEAR_ARRAY (c)) max_vf = 1; /* FALLTHRU */ case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_LASTPRIVATE: if (is_variable_sized (OMP_CLAUSE_DECL (c))) max_vf = 1; break; default: continue; } /* Do all the fixed sized types in the first pass, and the variable sized types in the second pass. This makes sure that the scalar arguments to the variable sized types are processed before we use them in the variable sized operations. */ for (pass = 0; pass < 2; ++pass) { for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c); tree var, new_var; bool by_ref; location_t clause_loc = OMP_CLAUSE_LOCATION (c); switch (c_kind) { case OMP_CLAUSE_PRIVATE: if (OMP_CLAUSE_PRIVATE_DEBUG (c)) continue; break; case OMP_CLAUSE_SHARED: /* Ignore shared directives in teams construct. */ if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS) continue; if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL) { gcc_assert (is_global_var (OMP_CLAUSE_DECL (c))); continue; } case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_LINEAR: break; case OMP_CLAUSE_REDUCTION: if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c)) reduction_omp_orig_ref = true; break; case OMP_CLAUSE__LOOPTEMP_: /* Handle _looptemp_ clauses only on parallel. */ if (fd) continue; break; case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) { lastprivate_firstprivate = true; if (pass != 0) continue; } /* Even without corresponding firstprivate, if decl is Fortran allocatable, it needs outer var reference. */ else if (pass == 0 && lang_hooks.decls.omp_private_outer_ref (OMP_CLAUSE_DECL (c))) lastprivate_firstprivate = true; break; case OMP_CLAUSE_ALIGNED: if (pass == 0) continue; var = OMP_CLAUSE_DECL (c); if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE && !is_global_var (var)) { new_var = maybe_lookup_decl (var, ctx); if (new_var == NULL_TREE) new_var = maybe_lookup_decl_in_outer_ctx (var, ctx); x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED); x = build_call_expr_loc (clause_loc, x, 2, new_var, omp_clause_aligned_alignment (c)); x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x); x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x); gimplify_and_add (x, ilist); } else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE && is_global_var (var)) { tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2; new_var = lookup_decl (var, ctx); t = maybe_lookup_decl_in_outer_ctx (var, ctx); t = build_fold_addr_expr_loc (clause_loc, t); t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED); t = build_call_expr_loc (clause_loc, t2, 2, t, omp_clause_aligned_alignment (c)); t = fold_convert_loc (clause_loc, ptype, t); x = create_tmp_var (ptype); t = build2 (MODIFY_EXPR, ptype, x, t); gimplify_and_add (t, ilist); t = build_simple_mem_ref_loc (clause_loc, x); SET_DECL_VALUE_EXPR (new_var, t); DECL_HAS_VALUE_EXPR_P (new_var) = 1; } continue; default: continue; } new_var = var = OMP_CLAUSE_DECL (c); if (c_kind != OMP_CLAUSE_COPYIN) new_var = lookup_decl (var, ctx); if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN) { if (pass != 0) continue; } else if (is_variable_sized (var)) { /* For variable sized types, we need to allocate the actual storage here. Call alloca and store the result in the pointer decl that we created elsewhere. */ if (pass == 0) continue; if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx)) { gcall *stmt; tree tmp, atmp; ptr = DECL_VALUE_EXPR (new_var); gcc_assert (TREE_CODE (ptr) == INDIRECT_REF); ptr = TREE_OPERAND (ptr, 0); gcc_assert (DECL_P (ptr)); x = TYPE_SIZE_UNIT (TREE_TYPE (new_var)); /* void *tmp = __builtin_alloca */ atmp = builtin_decl_explicit (BUILT_IN_ALLOCA); stmt = gimple_build_call (atmp, 1, x); tmp = create_tmp_var_raw (ptr_type_node); gimple_add_tmp_var (tmp); gimple_call_set_lhs (stmt, tmp); gimple_seq_add_stmt (ilist, stmt); x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp); gimplify_assign (ptr, x, ilist); } } else if (is_reference (var)) { /* For references that are being privatized for Fortran, allocate new backing storage for the new pointer variable. This allows us to avoid changing all the code that expects a pointer to something that expects a direct variable. */ if (pass == 0) continue; x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var))); if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx)) { x = build_receiver_ref (var, false, ctx); x = build_fold_addr_expr_loc (clause_loc, x); } else if (TREE_CONSTANT (x)) { /* For reduction in SIMD loop, defer adding the initialization of the reference, because if we decide to use SIMD array for it, the initilization could cause expansion ICE. */ if (c_kind == OMP_CLAUSE_REDUCTION && is_simd) x = NULL_TREE; else { const char *name = NULL; if (DECL_NAME (var)) name = IDENTIFIER_POINTER (DECL_NAME (new_var)); x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)), name); gimple_add_tmp_var (x); TREE_ADDRESSABLE (x) = 1; x = build_fold_addr_expr_loc (clause_loc, x); } } else { tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA); x = build_call_expr_loc (clause_loc, atmp, 1, x); } if (x) { x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x); gimplify_assign (new_var, x, ilist); } new_var = build_simple_mem_ref_loc (clause_loc, new_var); } else if (c_kind == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { if (pass == 0) continue; } else if (pass != 0) continue; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_SHARED: /* Ignore shared directives in teams construct. */ if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS) continue; /* Shared global vars are just accessed directly. */ if (is_global_var (new_var)) break; /* Set up the DECL_VALUE_EXPR for shared variables now. This needs to be delayed until after fixup_child_record_type so that we get the correct type during the dereference. */ by_ref = use_pointer_for_field (var, ctx); x = build_receiver_ref (var, by_ref, ctx); SET_DECL_VALUE_EXPR (new_var, x); DECL_HAS_VALUE_EXPR_P (new_var) = 1; /* ??? If VAR is not passed by reference, and the variable hasn't been initialized yet, then we'll get a warning for the store into the omp_data_s structure. Ideally, we'd be able to notice this and not store anything at all, but we're generating code too early. Suppress the warning. */ if (!by_ref) TREE_NO_WARNING (var) = 1; break; case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_PRIVATE: if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE) x = build_outer_var_ref (var, ctx); else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c)) { if (is_task_ctx (ctx)) x = build_receiver_ref (var, false, ctx); else x = build_outer_var_ref (var, ctx); } else x = NULL; do_private: tree nx; nx = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x); if (is_simd) { tree y = lang_hooks.decls.omp_clause_dtor (c, new_var); if ((TREE_ADDRESSABLE (new_var) || nx || y || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE) && lower_rec_simd_input_clauses (new_var, ctx, max_vf, idx, lane, ivar, lvar)) { if (nx) x = lang_hooks.decls.omp_clause_default_ctor (c, unshare_expr (ivar), x); if (nx && x) gimplify_and_add (x, &llist[0]); if (y) { y = lang_hooks.decls.omp_clause_dtor (c, ivar); if (y) { gimple_seq tseq = NULL; dtor = y; gimplify_stmt (&dtor, &tseq); gimple_seq_add_seq (&llist[1], tseq); } } break; } } if (nx) gimplify_and_add (nx, ilist); /* FALLTHRU */ do_dtor: x = lang_hooks.decls.omp_clause_dtor (c, new_var); if (x) { gimple_seq tseq = NULL; dtor = x; gimplify_stmt (&dtor, &tseq); gimple_seq_add_seq (dlist, tseq); } break; case OMP_CLAUSE_LINEAR: if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c)) goto do_firstprivate; if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c)) x = NULL; else x = build_outer_var_ref (var, ctx); goto do_private; case OMP_CLAUSE_FIRSTPRIVATE: if (is_task_ctx (ctx)) { if (is_reference (var) || is_variable_sized (var)) goto do_dtor; else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)) || use_pointer_for_field (var, NULL)) { x = build_receiver_ref (var, false, ctx); SET_DECL_VALUE_EXPR (new_var, x); DECL_HAS_VALUE_EXPR_P (new_var) = 1; goto do_dtor; } } do_firstprivate: x = build_outer_var_ref (var, ctx); if (is_simd) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && gimple_omp_for_combined_into_p (ctx->stmt)) { tree t = OMP_CLAUSE_LINEAR_STEP (c); tree stept = TREE_TYPE (t); tree ct = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_); gcc_assert (ct); tree l = OMP_CLAUSE_DECL (ct); tree n1 = fd->loop.n1; tree step = fd->loop.step; tree itype = TREE_TYPE (l); if (POINTER_TYPE_P (itype)) itype = signed_type_for (itype); l = fold_build2 (MINUS_EXPR, itype, l, n1); if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR) l = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, l), fold_build1 (NEGATE_EXPR, itype, step)); else l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step); t = fold_build2 (MULT_EXPR, stept, fold_convert (stept, l), t); if (OMP_CLAUSE_LINEAR_ARRAY (c)) { x = lang_hooks.decls.omp_clause_linear_ctor (c, new_var, x, t); gimplify_and_add (x, ilist); goto do_dtor; } if (POINTER_TYPE_P (TREE_TYPE (x))) x = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (x), x, t); else x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t); } if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR || TREE_ADDRESSABLE (new_var)) && lower_rec_simd_input_clauses (new_var, ctx, max_vf, idx, lane, ivar, lvar)) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR) { tree iv = create_tmp_var (TREE_TYPE (new_var)); x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x); gimplify_and_add (x, ilist); gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt)); gassign *g = gimple_build_assign (unshare_expr (lvar), iv); gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT); tree t = OMP_CLAUSE_LINEAR_STEP (c); enum tree_code code = PLUS_EXPR; if (POINTER_TYPE_P (TREE_TYPE (new_var))) code = POINTER_PLUS_EXPR; g = gimple_build_assign (iv, code, iv, t); gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT); break; } x = lang_hooks.decls.omp_clause_copy_ctor (c, unshare_expr (ivar), x); gimplify_and_add (x, &llist[0]); x = lang_hooks.decls.omp_clause_dtor (c, ivar); if (x) { gimple_seq tseq = NULL; dtor = x; gimplify_stmt (&dtor, &tseq); gimple_seq_add_seq (&llist[1], tseq); } break; } } x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x); gimplify_and_add (x, ilist); goto do_dtor; case OMP_CLAUSE__LOOPTEMP_: gcc_assert (is_parallel_ctx (ctx)); x = build_outer_var_ref (var, ctx); x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x); gimplify_and_add (x, ilist); break; case OMP_CLAUSE_COPYIN: by_ref = use_pointer_for_field (var, NULL); x = build_receiver_ref (var, by_ref, ctx); x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x); append_to_statement_list (x, &copyin_seq); copyin_by_ref |= by_ref; break; case OMP_CLAUSE_REDUCTION: if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c); gimple tseq; x = build_outer_var_ref (var, ctx); if (is_reference (var) && !useless_type_conversion_p (TREE_TYPE (placeholder), TREE_TYPE (x))) x = build_fold_addr_expr_loc (clause_loc, x); SET_DECL_VALUE_EXPR (placeholder, x); DECL_HAS_VALUE_EXPR_P (placeholder) = 1; tree new_vard = new_var; if (is_reference (var)) { gcc_assert (TREE_CODE (new_var) == MEM_REF); new_vard = TREE_OPERAND (new_var, 0); gcc_assert (DECL_P (new_vard)); } if (is_simd && lower_rec_simd_input_clauses (new_var, ctx, max_vf, idx, lane, ivar, lvar)) { if (new_vard == new_var) { gcc_assert (DECL_VALUE_EXPR (new_var) == lvar); SET_DECL_VALUE_EXPR (new_var, ivar); } else { SET_DECL_VALUE_EXPR (new_vard, build_fold_addr_expr (ivar)); DECL_HAS_VALUE_EXPR_P (new_vard) = 1; } x = lang_hooks.decls.omp_clause_default_ctor (c, unshare_expr (ivar), build_outer_var_ref (var, ctx)); if (x) gimplify_and_add (x, &llist[0]); if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c)) { tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c); lower_omp (&tseq, ctx); gimple_seq_add_seq (&llist[0], tseq); } OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL; tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c); lower_omp (&tseq, ctx); gimple_seq_add_seq (&llist[1], tseq); OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL; DECL_HAS_VALUE_EXPR_P (placeholder) = 0; if (new_vard == new_var) SET_DECL_VALUE_EXPR (new_var, lvar); else SET_DECL_VALUE_EXPR (new_vard, build_fold_addr_expr (lvar)); x = lang_hooks.decls.omp_clause_dtor (c, ivar); if (x) { tseq = NULL; dtor = x; gimplify_stmt (&dtor, &tseq); gimple_seq_add_seq (&llist[1], tseq); } break; } /* If this is a reference to constant size reduction var with placeholder, we haven't emitted the initializer for it because it is undesirable if SIMD arrays are used. But if they aren't used, we need to emit the deferred initialization now. */ else if (is_reference (var) && is_simd) handle_simd_reference (clause_loc, new_vard, ilist); x = lang_hooks.decls.omp_clause_default_ctor (c, unshare_expr (new_var), build_outer_var_ref (var, ctx)); if (x) gimplify_and_add (x, ilist); if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c)) { tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c); lower_omp (&tseq, ctx); gimple_seq_add_seq (ilist, tseq); } OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL; if (is_simd) { tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c); lower_omp (&tseq, ctx); gimple_seq_add_seq (dlist, tseq); OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL; } DECL_HAS_VALUE_EXPR_P (placeholder) = 0; goto do_dtor; } else { x = omp_reduction_init (c, TREE_TYPE (new_var)); gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE); enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c); /* reduction(-:var) sums up the partial results, so it acts identically to reduction(+:var). */ if (code == MINUS_EXPR) code = PLUS_EXPR; tree new_vard = new_var; if (is_simd && is_reference (var)) { gcc_assert (TREE_CODE (new_var) == MEM_REF); new_vard = TREE_OPERAND (new_var, 0); gcc_assert (DECL_P (new_vard)); } if (is_simd && lower_rec_simd_input_clauses (new_var, ctx, max_vf, idx, lane, ivar, lvar)) { tree ref = build_outer_var_ref (var, ctx); gimplify_assign (unshare_expr (ivar), x, &llist[0]); x = build2 (code, TREE_TYPE (ref), ref, ivar); ref = build_outer_var_ref (var, ctx); gimplify_assign (ref, x, &llist[1]); if (new_vard != new_var) { SET_DECL_VALUE_EXPR (new_vard, build_fold_addr_expr (lvar)); DECL_HAS_VALUE_EXPR_P (new_vard) = 1; } } else { if (is_reference (var) && is_simd) handle_simd_reference (clause_loc, new_vard, ilist); gimplify_assign (new_var, x, ilist); if (is_simd) { tree ref = build_outer_var_ref (var, ctx); x = build2 (code, TREE_TYPE (ref), ref, new_var); ref = build_outer_var_ref (var, ctx); gimplify_assign (ref, x, dlist); } } } break; default: gcc_unreachable (); } } } if (lane) { tree uid = create_tmp_var (ptr_type_node, "simduid"); /* Don't want uninit warnings on simduid, it is always uninitialized, but we use it not for the value, but for the DECL_UID only. */ TREE_NO_WARNING (uid) = 1; gimple g = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid); gimple_call_set_lhs (g, lane); gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt)); gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT); c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_); OMP_CLAUSE__SIMDUID__DECL (c) = uid; OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt); gimple_omp_for_set_clauses (ctx->stmt, c); g = gimple_build_assign (lane, INTEGER_CST, build_int_cst (unsigned_type_node, 0)); gimple_seq_add_stmt (ilist, g); for (int i = 0; i < 2; i++) if (llist[i]) { tree vf = create_tmp_var (unsigned_type_node); g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid); gimple_call_set_lhs (g, vf); gimple_seq *seq = i == 0 ? ilist : dlist; gimple_seq_add_stmt (seq, g); tree t = build_int_cst (unsigned_type_node, 0); g = gimple_build_assign (idx, INTEGER_CST, t); gimple_seq_add_stmt (seq, g); tree body = create_artificial_label (UNKNOWN_LOCATION); tree header = create_artificial_label (UNKNOWN_LOCATION); tree end = create_artificial_label (UNKNOWN_LOCATION); gimple_seq_add_stmt (seq, gimple_build_goto (header)); gimple_seq_add_stmt (seq, gimple_build_label (body)); gimple_seq_add_seq (seq, llist[i]); t = build_int_cst (unsigned_type_node, 1); g = gimple_build_assign (idx, PLUS_EXPR, idx, t); gimple_seq_add_stmt (seq, g); gimple_seq_add_stmt (seq, gimple_build_label (header)); g = gimple_build_cond (LT_EXPR, idx, vf, body, end); gimple_seq_add_stmt (seq, g); gimple_seq_add_stmt (seq, gimple_build_label (end)); } } /* The copyin sequence is not to be executed by the main thread, since that would result in self-copies. Perhaps not visible to scalars, but it certainly is to C++ operator=. */ if (copyin_seq) { x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0); x = build2 (NE_EXPR, boolean_type_node, x, build_int_cst (TREE_TYPE (x), 0)); x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL); gimplify_and_add (x, ilist); } /* If any copyin variable is passed by reference, we must ensure the master thread doesn't modify it before it is copied over in all threads. Similarly for variables in both firstprivate and lastprivate clauses we need to ensure the lastprivate copying happens after firstprivate copying in all threads. And similarly for UDRs if initializer expression refers to omp_orig. */ if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref) { /* Don't add any barrier for #pragma omp simd or #pragma omp distribute. */ if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR) gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE)); } /* If max_vf is non-zero, then we can use only a vectorization factor up to the max_vf we chose. So stick it into the safelen clause. */ if (max_vf) { tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt), OMP_CLAUSE_SAFELEN); if (c == NULL_TREE || (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) == INTEGER_CST && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c), max_vf) == 1)) { c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN); OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node, max_vf); OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt); gimple_omp_for_set_clauses (ctx->stmt, c); } } } /* Generate code to implement the LASTPRIVATE clauses. This is used for both parallel and workshare constructs. PREDICATE may be NULL if it's always true. */ static void lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list, omp_context *ctx) { tree x, c, label = NULL, orig_clauses = clauses; bool par_clauses = false; tree simduid = NULL, lastlane = NULL; /* Early exit if there are no lastprivate or linear clauses. */ for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses)) if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses))) break; if (clauses == NULL) { /* If this was a workshare clause, see if it had been combined with its parallel. In that case, look for the clauses on the parallel statement itself. */ if (is_parallel_ctx (ctx)) return; ctx = ctx->outer; if (ctx == NULL || !is_parallel_ctx (ctx)) return; clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt), OMP_CLAUSE_LASTPRIVATE); if (clauses == NULL) return; par_clauses = true; } if (predicate) { gcond *stmt; tree label_true, arm1, arm2; label = create_artificial_label (UNKNOWN_LOCATION); label_true = create_artificial_label (UNKNOWN_LOCATION); arm1 = TREE_OPERAND (predicate, 0); arm2 = TREE_OPERAND (predicate, 1); gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue); gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue); stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2, label_true, label); gimple_seq_add_stmt (stmt_list, stmt); gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true)); } if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD) { simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_); if (simduid) simduid = OMP_CLAUSE__SIMDUID__DECL (simduid); } for (c = clauses; c ;) { tree var, new_var; location_t clause_loc = OMP_CLAUSE_LOCATION (c); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c))) { var = OMP_CLAUSE_DECL (c); new_var = lookup_decl (var, ctx); if (simduid && DECL_HAS_VALUE_EXPR_P (new_var)) { tree val = DECL_VALUE_EXPR (new_var); if (TREE_CODE (val) == ARRAY_REF && VAR_P (TREE_OPERAND (val, 0)) && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (TREE_OPERAND (val, 0)))) { if (lastlane == NULL) { lastlane = create_tmp_var (unsigned_type_node); gcall *g = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE, 2, simduid, TREE_OPERAND (val, 1)); gimple_call_set_lhs (g, lastlane); gimple_seq_add_stmt (stmt_list, g); } new_var = build4 (ARRAY_REF, TREE_TYPE (val), TREE_OPERAND (val, 0), lastlane, NULL_TREE, NULL_TREE); } } if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c)) { lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx); gimple_seq_add_seq (stmt_list, OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c)); OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c)) { lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx); gimple_seq_add_seq (stmt_list, OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c)); OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL; } x = build_outer_var_ref (var, ctx); if (is_reference (var)) new_var = build_simple_mem_ref_loc (clause_loc, new_var); x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var); gimplify_and_add (x, stmt_list); } c = OMP_CLAUSE_CHAIN (c); if (c == NULL && !par_clauses) { /* If this was a workshare clause, see if it had been combined with its parallel. In that case, continue looking for the clauses also on the parallel statement itself. */ if (is_parallel_ctx (ctx)) break; ctx = ctx->outer; if (ctx == NULL || !is_parallel_ctx (ctx)) break; c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt), OMP_CLAUSE_LASTPRIVATE); par_clauses = true; } } if (label) gimple_seq_add_stmt (stmt_list, gimple_build_label (label)); } static void oacc_lower_reduction_var_helper (gimple_seq *stmt_seqp, omp_context *ctx, tree tid, tree var, tree new_var) { /* The atomic add at the end of the sum creates unnecessary write contention on accelerators. To work around this, create an array to store the partial reductions. Later, in lower_omp_for (for openacc), the values of array will be combined. */ tree t = NULL_TREE, array, x; tree type = get_base_type (var); gimple stmt; /* Now insert the partial reductions into the array. */ /* Find the reduction array. */ tree ptype = build_pointer_type (type); t = lookup_oacc_reduction (oacc_get_reduction_array_id (var), ctx); t = build_receiver_ref (t, false, ctx->outer); array = create_tmp_var (ptype); gimplify_assign (array, t, stmt_seqp); tree ptr = create_tmp_var (TREE_TYPE (array)); /* Find the reduction array. */ /* testing a unary conversion. */ tree offset = create_tmp_var (sizetype); gimplify_assign (offset, TYPE_SIZE_UNIT (type), stmt_seqp); t = create_tmp_var (sizetype); gimplify_assign (t, unshare_expr (fold_build1 (NOP_EXPR, sizetype, tid)), stmt_seqp); stmt = gimple_build_assign (offset, MULT_EXPR, offset, t); gimple_seq_add_stmt (stmt_seqp, stmt); /* Offset expression. Does the POINTER_PLUS_EXPR take care of adding sizeof(var) to the array? */ ptr = create_tmp_var (ptype); stmt = gimple_build_assign (unshare_expr (ptr), POINTER_PLUS_EXPR, array, offset); gimple_seq_add_stmt (stmt_seqp, stmt); /* Move the local sum to gfc$sum[i]. */ x = unshare_expr (build_simple_mem_ref (ptr)); stmt = gimplify_assign (x, new_var, stmt_seqp); } /* Generate code to implement the REDUCTION clauses. */ static void lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx) { gimple_seq sub_seq = NULL; gimple stmt; tree x, c, tid = NULL_TREE; int count = 0; /* SIMD reductions are handled in lower_rec_input_clauses. */ if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD) return; /* First see if there is exactly one reduction clause. Use OMP_ATOMIC update in that case, otherwise use a lock. */ for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) { if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { /* Never use OMP_ATOMIC for array reductions or UDRs. */ count = -1; break; } count++; } if (count == 0) return; /* Initialize thread info for OpenACC. */ if (is_gimple_omp_oacc (ctx->stmt)) { /* Get the current thread id. */ tree call = builtin_decl_explicit (BUILT_IN_GOACC_GET_THREAD_NUM); tid = create_tmp_var (TREE_TYPE (TREE_TYPE (call))); gimple stmt = gimple_build_call (call, 0); gimple_call_set_lhs (stmt, tid); gimple_seq_add_stmt (stmt_seqp, stmt); } for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree var, ref, new_var; enum tree_code code; location_t clause_loc = OMP_CLAUSE_LOCATION (c); if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION) continue; var = OMP_CLAUSE_DECL (c); new_var = lookup_decl (var, ctx); if (is_reference (var)) new_var = build_simple_mem_ref_loc (clause_loc, new_var); ref = build_outer_var_ref (var, ctx); code = OMP_CLAUSE_REDUCTION_CODE (c); /* reduction(-:var) sums up the partial results, so it acts identically to reduction(+:var). */ if (code == MINUS_EXPR) code = PLUS_EXPR; if (is_gimple_omp_oacc (ctx->stmt)) { gcc_checking_assert (!OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)); oacc_lower_reduction_var_helper (stmt_seqp, ctx, tid, var, new_var); } else if (count == 1) { tree addr = build_fold_addr_expr_loc (clause_loc, ref); addr = save_expr (addr); ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr); x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var); x = build2 (OMP_ATOMIC, void_type_node, addr, x); gimplify_and_add (x, stmt_seqp); return; } else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c); if (is_reference (var) && !useless_type_conversion_p (TREE_TYPE (placeholder), TREE_TYPE (ref))) ref = build_fold_addr_expr_loc (clause_loc, ref); SET_DECL_VALUE_EXPR (placeholder, ref); DECL_HAS_VALUE_EXPR_P (placeholder) = 1; lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx); gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c)); OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL; OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL; } else { x = build2 (code, TREE_TYPE (ref), ref, new_var); ref = build_outer_var_ref (var, ctx); gimplify_assign (ref, x, &sub_seq); } } if (is_gimple_omp_oacc (ctx->stmt)) return; stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START), 0); gimple_seq_add_stmt (stmt_seqp, stmt); gimple_seq_add_seq (stmt_seqp, sub_seq); stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END), 0); gimple_seq_add_stmt (stmt_seqp, stmt); } /* Generate code to implement the COPYPRIVATE clauses. */ static void lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist, omp_context *ctx) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree var, new_var, ref, x; bool by_ref; location_t clause_loc = OMP_CLAUSE_LOCATION (c); if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE) continue; var = OMP_CLAUSE_DECL (c); by_ref = use_pointer_for_field (var, NULL); ref = build_sender_ref (var, ctx); x = new_var = lookup_decl_in_outer_ctx (var, ctx); if (by_ref) { x = build_fold_addr_expr_loc (clause_loc, new_var); x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x); } gimplify_assign (ref, x, slist); ref = build_receiver_ref (var, false, ctx); if (by_ref) { ref = fold_convert_loc (clause_loc, build_pointer_type (TREE_TYPE (new_var)), ref); ref = build_fold_indirect_ref_loc (clause_loc, ref); } if (is_reference (var)) { ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref); ref = build_simple_mem_ref_loc (clause_loc, ref); new_var = build_simple_mem_ref_loc (clause_loc, new_var); } x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref); gimplify_and_add (x, rlist); } } /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE, and REDUCTION from the sender (aka parent) side. */ static void lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist, omp_context *ctx) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree val, ref, x, var; bool by_ref, do_in = false, do_out = false; location_t clause_loc = OMP_CLAUSE_LOCATION (c); switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: if (OMP_CLAUSE_PRIVATE_OUTER_REF (c)) break; continue; case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_LASTPRIVATE: case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE__LOOPTEMP_: break; default: continue; } val = OMP_CLAUSE_DECL (c); var = lookup_decl_in_outer_ctx (val, ctx); if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN && is_global_var (var)) continue; if (is_variable_sized (val)) continue; by_ref = use_pointer_for_field (val, NULL); switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE__LOOPTEMP_: do_in = true; break; case OMP_CLAUSE_LASTPRIVATE: if (by_ref || is_reference (val)) { if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) continue; do_in = true; } else { do_out = true; if (lang_hooks.decls.omp_private_outer_ref (val)) do_in = true; } break; case OMP_CLAUSE_REDUCTION: do_in = true; do_out = !(by_ref || is_reference (val)); break; default: gcc_unreachable (); } if (do_in) { ref = build_sender_ref (val, ctx); x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var; gimplify_assign (ref, x, ilist); if (is_task_ctx (ctx)) DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL; } if (do_out) { ref = build_sender_ref (val, ctx); gimplify_assign (var, ref, olist); } } } /* Generate code to implement SHARED from the sender (aka parent) side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't list things that got automatically shared. */ static void lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx) { tree var, ovar, nvar, f, x, record_type; if (ctx->record_type == NULL) return; record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type; for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f)) { ovar = DECL_ABSTRACT_ORIGIN (f); nvar = maybe_lookup_decl (ovar, ctx); if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar)) continue; /* If CTX is a nested parallel directive. Find the immediately enclosing parallel or workshare construct that contains a mapping for OVAR. */ var = lookup_decl_in_outer_ctx (ovar, ctx); if (use_pointer_for_field (ovar, ctx)) { x = build_sender_ref (ovar, ctx); var = build_fold_addr_expr (var); gimplify_assign (x, var, ilist); } else { x = build_sender_ref (ovar, ctx); gimplify_assign (x, var, ilist); if (!TREE_READONLY (var) /* We don't need to receive a new reference to a result or parm decl. In fact we may not store to it as we will invalidate any pending RSO and generate wrong gimple during inlining. */ && !((TREE_CODE (var) == RESULT_DECL || TREE_CODE (var) == PARM_DECL) && DECL_BY_REFERENCE (var))) { x = build_sender_ref (ovar, ctx); gimplify_assign (var, x, olist); } } } } /* A convenience function to build an empty GIMPLE_COND with just the condition. */ static gcond * gimple_build_cond_empty (tree cond) { enum tree_code pred_code; tree lhs, rhs; gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs); return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE); } /* Build the function calls to GOMP_parallel_start etc to actually generate the parallel operation. REGION is the parallel region being expanded. BB is the block where to insert the code. WS_ARGS will be set if this is a call to a combined parallel+workshare construct, it contains the list of additional arguments needed by the workshare construct. */ static void expand_parallel_call (struct omp_region *region, basic_block bb, gomp_parallel *entry_stmt, vec<tree, va_gc> *ws_args) { tree t, t1, t2, val, cond, c, clauses, flags; gimple_stmt_iterator gsi; gimple stmt; enum built_in_function start_ix; int start_ix2; location_t clause_loc; vec<tree, va_gc> *args; clauses = gimple_omp_parallel_clauses (entry_stmt); /* Determine what flavor of GOMP_parallel we will be emitting. */ start_ix = BUILT_IN_GOMP_PARALLEL; if (is_combined_parallel (region)) { switch (region->inner->type) { case GIMPLE_OMP_FOR: gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO); start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC + (region->inner->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME ? 3 : region->inner->sched_kind)); start_ix = (enum built_in_function)start_ix2; break; case GIMPLE_OMP_SECTIONS: start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS; break; default: gcc_unreachable (); } } /* By default, the value of NUM_THREADS is zero (selected at run time) and there is no conditional. */ cond = NULL_TREE; val = build_int_cst (unsigned_type_node, 0); flags = build_int_cst (unsigned_type_node, 0); c = find_omp_clause (clauses, OMP_CLAUSE_IF); if (c) cond = OMP_CLAUSE_IF_EXPR (c); c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS); if (c) { val = OMP_CLAUSE_NUM_THREADS_EXPR (c); clause_loc = OMP_CLAUSE_LOCATION (c); } else clause_loc = gimple_location (entry_stmt); c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND); if (c) flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c)); /* Ensure 'val' is of the correct type. */ val = fold_convert_loc (clause_loc, unsigned_type_node, val); /* If we found the clause 'if (cond)', build either (cond != 0) or (cond ? val : 1u). */ if (cond) { cond = gimple_boolify (cond); if (integer_zerop (val)) val = fold_build2_loc (clause_loc, EQ_EXPR, unsigned_type_node, cond, build_int_cst (TREE_TYPE (cond), 0)); else { basic_block cond_bb, then_bb, else_bb; edge e, e_then, e_else; tree tmp_then, tmp_else, tmp_join, tmp_var; tmp_var = create_tmp_var (TREE_TYPE (val)); if (gimple_in_ssa_p (cfun)) { tmp_then = make_ssa_name (tmp_var); tmp_else = make_ssa_name (tmp_var); tmp_join = make_ssa_name (tmp_var); } else { tmp_then = tmp_var; tmp_else = tmp_var; tmp_join = tmp_var; } e = split_block (bb, NULL); cond_bb = e->src; bb = e->dest; remove_edge (e); then_bb = create_empty_bb (cond_bb); else_bb = create_empty_bb (then_bb); set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb); set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb); stmt = gimple_build_cond_empty (cond); gsi = gsi_start_bb (cond_bb); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); gsi = gsi_start_bb (then_bb); stmt = gimple_build_assign (tmp_then, val); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); gsi = gsi_start_bb (else_bb); stmt = gimple_build_assign (tmp_else, build_int_cst (unsigned_type_node, 1)); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE); add_bb_to_loop (then_bb, cond_bb->loop_father); add_bb_to_loop (else_bb, cond_bb->loop_father); e_then = make_edge (then_bb, bb, EDGE_FALLTHRU); e_else = make_edge (else_bb, bb, EDGE_FALLTHRU); if (gimple_in_ssa_p (cfun)) { gphi *phi = create_phi_node (tmp_join, bb); add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION); add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION); } val = tmp_join; } gsi = gsi_start_bb (bb); val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } gsi = gsi_last_bb (bb); t = gimple_omp_parallel_data_arg (entry_stmt); if (t == NULL) t1 = null_pointer_node; else t1 = build_fold_addr_expr (t); t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt)); vec_alloc (args, 4 + vec_safe_length (ws_args)); args->quick_push (t2); args->quick_push (t1); args->quick_push (val); if (ws_args) args->splice (*ws_args); args->quick_push (flags); t = build_call_expr_loc_vec (UNKNOWN_LOCATION, builtin_decl_explicit (start_ix), args); force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } /* Insert a function call whose name is FUNC_NAME with the information from ENTRY_STMT into the basic_block BB. */ static void expand_cilk_for_call (basic_block bb, gomp_parallel *entry_stmt, vec <tree, va_gc> *ws_args) { tree t, t1, t2; gimple_stmt_iterator gsi; vec <tree, va_gc> *args; gcc_assert (vec_safe_length (ws_args) == 2); tree func_name = (*ws_args)[0]; tree grain = (*ws_args)[1]; tree clauses = gimple_omp_parallel_clauses (entry_stmt); tree count = find_omp_clause (clauses, OMP_CLAUSE__CILK_FOR_COUNT_); gcc_assert (count != NULL_TREE); count = OMP_CLAUSE_OPERAND (count, 0); gsi = gsi_last_bb (bb); t = gimple_omp_parallel_data_arg (entry_stmt); if (t == NULL) t1 = null_pointer_node; else t1 = build_fold_addr_expr (t); t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt)); vec_alloc (args, 4); args->quick_push (t2); args->quick_push (t1); args->quick_push (count); args->quick_push (grain); t = build_call_expr_loc_vec (UNKNOWN_LOCATION, func_name, args); force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } /* Build the function call to GOMP_task to actually generate the task operation. BB is the block where to insert the code. */ static void expand_task_call (basic_block bb, gomp_task *entry_stmt) { tree t, t1, t2, t3, flags, cond, c, c2, clauses, depend; gimple_stmt_iterator gsi; location_t loc = gimple_location (entry_stmt); clauses = gimple_omp_task_clauses (entry_stmt); c = find_omp_clause (clauses, OMP_CLAUSE_IF); if (c) cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c)); else cond = boolean_true_node; c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED); c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE); depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND); flags = build_int_cst (unsigned_type_node, (c ? 1 : 0) + (c2 ? 4 : 0) + (depend ? 8 : 0)); c = find_omp_clause (clauses, OMP_CLAUSE_FINAL); if (c) { c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c)); c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c, build_int_cst (unsigned_type_node, 2), build_int_cst (unsigned_type_node, 0)); flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c); } if (depend) depend = OMP_CLAUSE_DECL (depend); else depend = build_int_cst (ptr_type_node, 0); gsi = gsi_last_bb (bb); t = gimple_omp_task_data_arg (entry_stmt); if (t == NULL) t2 = null_pointer_node; else t2 = build_fold_addr_expr_loc (loc, t); t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt)); t = gimple_omp_task_copy_fn (entry_stmt); if (t == NULL) t3 = null_pointer_node; else t3 = build_fold_addr_expr_loc (loc, t); t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK), 8, t1, t2, t3, gimple_omp_task_arg_size (entry_stmt), gimple_omp_task_arg_align (entry_stmt), cond, flags, depend); force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW catch handler and return it. This prevents programs from violating the structured block semantics with throws. */ static gimple_seq maybe_catch_exception (gimple_seq body) { gimple g; tree decl; if (!flag_exceptions) return body; if (lang_hooks.eh_protect_cleanup_actions != NULL) decl = lang_hooks.eh_protect_cleanup_actions (); else decl = builtin_decl_explicit (BUILT_IN_TRAP); g = gimple_build_eh_must_not_throw (decl); g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_CATCH); return gimple_seq_alloc_with_stmt (g); } /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */ static tree vec2chain (vec<tree, va_gc> *v) { tree chain = NULL_TREE, t; unsigned ix; FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t) { DECL_CHAIN (t) = chain; chain = t; } return chain; } /* Remove barriers in REGION->EXIT's block. Note that this is only valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be removed. */ static void remove_exit_barrier (struct omp_region *region) { gimple_stmt_iterator gsi; basic_block exit_bb; edge_iterator ei; edge e; gimple stmt; int any_addressable_vars = -1; exit_bb = region->exit; /* If the parallel region doesn't return, we don't have REGION->EXIT block at all. */ if (! exit_bb) return; /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of statements that can appear in between are extremely limited -- no memory operations at all. Here, we allow nothing at all, so the only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */ gsi = gsi_last_bb (exit_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); gsi_prev (&gsi); if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL) return; FOR_EACH_EDGE (e, ei, exit_bb->preds) { gsi = gsi_last_bb (e->src); if (gsi_end_p (gsi)) continue; stmt = gsi_stmt (gsi); if (gimple_code (stmt) == GIMPLE_OMP_RETURN && !gimple_omp_return_nowait_p (stmt)) { /* OpenMP 3.0 tasks unfortunately prevent this optimization in many cases. If there could be tasks queued, the barrier might be needed to let the tasks run before some local variable of the parallel that the task uses as shared runs out of scope. The task can be spawned either from within current function (this would be easy to check) or from some function it calls and gets passed an address of such a variable. */ if (any_addressable_vars < 0) { gomp_parallel *parallel_stmt = as_a <gomp_parallel *> (last_stmt (region->entry)); tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt); tree local_decls, block, decl; unsigned ix; any_addressable_vars = 0; FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl) if (TREE_ADDRESSABLE (decl)) { any_addressable_vars = 1; break; } for (block = gimple_block (stmt); !any_addressable_vars && block && TREE_CODE (block) == BLOCK; block = BLOCK_SUPERCONTEXT (block)) { for (local_decls = BLOCK_VARS (block); local_decls; local_decls = DECL_CHAIN (local_decls)) if (TREE_ADDRESSABLE (local_decls)) { any_addressable_vars = 1; break; } if (block == gimple_block (parallel_stmt)) break; } } if (!any_addressable_vars) gimple_omp_return_set_nowait (stmt); } } } static void remove_exit_barriers (struct omp_region *region) { if (region->type == GIMPLE_OMP_PARALLEL) remove_exit_barrier (region); if (region->inner) { region = region->inner; remove_exit_barriers (region); while (region->next) { region = region->next; remove_exit_barriers (region); } } } /* Optimize omp_get_thread_num () and omp_get_num_threads () calls. These can't be declared as const functions, but within one parallel body they are constant, so they can be transformed there into __builtin_omp_get_{thread_num,num_threads} () which are declared const. Similarly for task body, except that in untied task omp_get_thread_num () can change at any task scheduling point. */ static void optimize_omp_library_calls (gimple entry_stmt) { basic_block bb; gimple_stmt_iterator gsi; tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree); tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree); bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK && find_omp_clause (gimple_omp_task_clauses (entry_stmt), OMP_CLAUSE_UNTIED) != NULL); FOR_EACH_BB_FN (bb, cfun) for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple call = gsi_stmt (gsi); tree decl; if (is_gimple_call (call) && (decl = gimple_call_fndecl (call)) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl) && DECL_INITIAL (decl) == NULL) { tree built_in; if (DECL_NAME (decl) == thr_num_id) { /* In #pragma omp task untied omp_get_thread_num () can change during the execution of the task region. */ if (untied_task) continue; built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); } else if (DECL_NAME (decl) == num_thr_id) built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); else continue; if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in) || gimple_call_num_args (call) != 0) continue; if (flag_exceptions && !TREE_NOTHROW (decl)) continue; if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)), TREE_TYPE (TREE_TYPE (built_in)))) continue; gimple_call_set_fndecl (call, built_in); } } } /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be regimplified. */ static tree expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *) { tree t = *tp; /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */ if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t)) return t; if (TREE_CODE (t) == ADDR_EXPR) recompute_tree_invariant_for_addr_expr (t); *walk_subtrees = !TYPE_P (t) && !DECL_P (t); return NULL_TREE; } /* Prepend TO = FROM assignment before *GSI_P. */ static void expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from) { bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to); from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE, true, GSI_SAME_STMT); gimple stmt = gimple_build_assign (to, from); gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT); if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL) || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL)) { gimple_stmt_iterator gsi = gsi_for_stmt (stmt); gimple_regimplify_operands (stmt, &gsi); } } /* Expand the OpenMP parallel or task directive starting at REGION. */ static void expand_omp_taskreg (struct omp_region *region) { basic_block entry_bb, exit_bb, new_bb; struct function *child_cfun; tree child_fn, block, t; gimple_stmt_iterator gsi; gimple entry_stmt, stmt; edge e; vec<tree, va_gc> *ws_args; entry_stmt = last_stmt (region->entry); child_fn = gimple_omp_taskreg_child_fn (entry_stmt); child_cfun = DECL_STRUCT_FUNCTION (child_fn); entry_bb = region->entry; exit_bb = region->exit; bool is_cilk_for = (flag_cilkplus && gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL && find_omp_clause (gimple_omp_parallel_clauses (entry_stmt), OMP_CLAUSE__CILK_FOR_COUNT_) != NULL_TREE); if (is_cilk_for) /* If it is a _Cilk_for statement, it is modelled *like* a parallel for, and the inner statement contains the name of the built-in function and grain. */ ws_args = region->inner->ws_args; else if (is_combined_parallel (region)) ws_args = region->ws_args; else ws_args = NULL; if (child_cfun->cfg) { /* Due to inlining, it may happen that we have already outlined the region, in which case all we need to do is make the sub-graph unreachable and emit the parallel call. */ edge entry_succ_e, exit_succ_e; entry_succ_e = single_succ_edge (entry_bb); gsi = gsi_last_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK); gsi_remove (&gsi, true); new_bb = entry_bb; if (exit_bb) { exit_succ_e = single_succ_edge (exit_bb); make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU); } remove_edge_and_dominated_blocks (entry_succ_e); } else { unsigned srcidx, dstidx, num; /* If the parallel region needs data sent from the parent function, then the very first statement (except possible tree profile counter updates) of the parallel body is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since &.OMP_DATA_O is passed as an argument to the child function, we need to replace it with the argument as seen by the child function. In most cases, this will end up being the identity assignment .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had a function call that has been inlined, the original PARM_DECL .OMP_DATA_I may have been converted into a different local variable. In which case, we need to keep the assignment. */ if (gimple_omp_taskreg_data_arg (entry_stmt)) { basic_block entry_succ_bb = single_succ (entry_bb); tree arg, narg; gimple parcopy_stmt = NULL; for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi)) { gimple stmt; gcc_assert (!gsi_end_p (gsi)); stmt = gsi_stmt (gsi); if (gimple_code (stmt) != GIMPLE_ASSIGN) continue; if (gimple_num_ops (stmt) == 2) { tree arg = gimple_assign_rhs1 (stmt); /* We're ignore the subcode because we're effectively doing a STRIP_NOPS. */ if (TREE_CODE (arg) == ADDR_EXPR && TREE_OPERAND (arg, 0) == gimple_omp_taskreg_data_arg (entry_stmt)) { parcopy_stmt = stmt; break; } } } gcc_assert (parcopy_stmt != NULL); arg = DECL_ARGUMENTS (child_fn); if (!gimple_in_ssa_p (cfun)) { if (gimple_assign_lhs (parcopy_stmt) == arg) gsi_remove (&gsi, true); else { /* ?? Is setting the subcode really necessary ?? */ gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg)); gimple_assign_set_rhs1 (parcopy_stmt, arg); } } else { /* If we are in ssa form, we must load the value from the default definition of the argument. That should not be defined now, since the argument is not used uninitialized. */ gcc_assert (ssa_default_def (cfun, arg) == NULL); narg = make_ssa_name (arg, gimple_build_nop ()); set_ssa_default_def (cfun, arg, narg); /* ?? Is setting the subcode really necessary ?? */ gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg)); gimple_assign_set_rhs1 (parcopy_stmt, narg); update_stmt (parcopy_stmt); } } /* Declare local variables needed in CHILD_CFUN. */ block = DECL_INITIAL (child_fn); BLOCK_VARS (block) = vec2chain (child_cfun->local_decls); /* The gimplifier could record temporaries in parallel/task block rather than in containing function's local_decls chain, which would mean cgraph missed finalizing them. Do it now. */ for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t)) if (TREE_CODE (t) == VAR_DECL && TREE_STATIC (t) && !DECL_EXTERNAL (t)) varpool_node::finalize_decl (t); DECL_SAVED_TREE (child_fn) = NULL; /* We'll create a CFG for child_fn, so no gimple body is needed. */ gimple_set_body (child_fn, NULL); TREE_USED (block) = 1; /* Reset DECL_CONTEXT on function arguments. */ for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = child_fn; /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK, so that it can be moved to the child function. */ gsi = gsi_last_bb (entry_bb); stmt = gsi_stmt (gsi); gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL || gimple_code (stmt) == GIMPLE_OMP_TASK)); e = split_block (entry_bb, stmt); gsi_remove (&gsi, true); entry_bb = e->dest; single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */ if (exit_bb) { gsi = gsi_last_bb (exit_bb); gcc_assert (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); stmt = gimple_build_return (NULL); gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); gsi_remove (&gsi, true); } /* Move the parallel region into CHILD_CFUN. */ if (gimple_in_ssa_p (cfun)) { init_tree_ssa (child_cfun); init_ssa_operands (child_cfun); child_cfun->gimple_df->in_ssa_p = true; block = NULL_TREE; } else block = gimple_block (entry_stmt); new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block); if (exit_bb) single_succ_edge (new_bb)->flags = EDGE_FALLTHRU; /* When the OMP expansion process cannot guarantee an up-to-date loop tree arrange for the child function to fixup loops. */ if (loops_state_satisfies_p (LOOPS_NEED_FIXUP)) child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP; /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */ num = vec_safe_length (child_cfun->local_decls); for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++) { t = (*child_cfun->local_decls)[srcidx]; if (DECL_CONTEXT (t) == cfun->decl) continue; if (srcidx != dstidx) (*child_cfun->local_decls)[dstidx] = t; dstidx++; } if (dstidx != num) vec_safe_truncate (child_cfun->local_decls, dstidx); /* Inform the callgraph about the new function. */ DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties; cgraph_node::add_new_function (child_fn, true); cgraph_node::get (child_fn)->parallelized_function = 1; /* Fix the callgraph edges for child_cfun. Those for cfun will be fixed in a following pass. */ push_cfun (child_cfun); if (optimize) optimize_omp_library_calls (entry_stmt); cgraph_edge::rebuild_edges (); /* Some EH regions might become dead, see PR34608. If pass_cleanup_cfg isn't the first pass to happen with the new child, these dead EH edges might cause problems. Clean them up now. */ if (flag_exceptions) { basic_block bb; bool changed = false; FOR_EACH_BB_FN (bb, cfun) changed |= gimple_purge_dead_eh_edges (bb); if (changed) cleanup_tree_cfg (); } if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa); pop_cfun (); } /* Emit a library call to launch the children threads. */ if (is_cilk_for) expand_cilk_for_call (new_bb, as_a <gomp_parallel *> (entry_stmt), ws_args); else if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL) expand_parallel_call (region, new_bb, as_a <gomp_parallel *> (entry_stmt), ws_args); else expand_task_call (new_bb, as_a <gomp_task *> (entry_stmt)); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_only_virtuals); } /* Helper function for expand_omp_{for_*,simd}. If this is the outermost of the combined collapse > 1 loop constructs, generate code like: if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB; if (cond3 is <) adj = STEP3 - 1; else adj = STEP3 + 1; count3 = (adj + N32 - N31) / STEP3; if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB; if (cond2 is <) adj = STEP2 - 1; else adj = STEP2 + 1; count2 = (adj + N22 - N21) / STEP2; if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB; if (cond1 is <) adj = STEP1 - 1; else adj = STEP1 + 1; count1 = (adj + N12 - N11) / STEP1; count = count1 * count2 * count3; Furthermore, if ZERO_ITER_BB is NULL, create a BB which does: count = 0; and set ZERO_ITER_BB to that bb. If this isn't the outermost of the combined loop constructs, just initialize COUNTS array from the _looptemp_ clauses. */ /* NOTE: It *could* be better to moosh all of the BBs together, creating one larger BB with all the computation and the unexpected jump at the end. I.e. bool zero3, zero2, zero1, zero; zero3 = N32 c3 N31; count3 = (N32 - N31) /[cl] STEP3; zero2 = N22 c2 N21; count2 = (N22 - N21) /[cl] STEP2; zero1 = N12 c1 N11; count1 = (N12 - N11) /[cl] STEP1; zero = zero3 || zero2 || zero1; count = count1 * count2 * count3; if (__builtin_expect(zero, false)) goto zero_iter_bb; After all, we expect the zero=false, and thus we expect to have to evaluate all of the comparison expressions, so short-circuiting oughtn't be a win. Since the condition isn't protecting a denominator, we're not concerned about divide-by-zero, so we can fully evaluate count even if a numerator turned out to be wrong. It seems like putting this all together would create much better scheduling opportunities, and less pressure on the chip's branch predictor. */ static void expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi, basic_block &entry_bb, tree *counts, basic_block &zero_iter_bb, int &first_zero_iter, basic_block &l2_dom_bb) { tree t, type = TREE_TYPE (fd->loop.v); edge e, ne; int i; /* Collapsed loops need work for expansion into SSA form. */ gcc_assert (!gimple_in_ssa_p (cfun)); if (gimple_omp_for_combined_into_p (fd->for_stmt) && TREE_CODE (fd->loop.n2) != INTEGER_CST) { /* First two _looptemp_ clauses are for istart/iend, counts[0] isn't supposed to be handled, as the inner loop doesn't use it. */ tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); for (i = 0; i < fd->collapse; i++) { innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); if (i) counts[i] = OMP_CLAUSE_DECL (innerc); else counts[0] = NULL_TREE; } return; } for (i = 0; i < fd->collapse; i++) { tree itype = TREE_TYPE (fd->loops[i].v); if (SSA_VAR_P (fd->loop.n2) && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node, fold_convert (itype, fd->loops[i].n1), fold_convert (itype, fd->loops[i].n2))) == NULL_TREE || !integer_onep (t))) { gcond *cond_stmt; tree n1, n2; n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1)); n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE, true, GSI_SAME_STMT); n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2)); n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE, true, GSI_SAME_STMT); cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2, NULL_TREE, NULL_TREE); gsi_insert_before (gsi, cond_stmt, GSI_SAME_STMT); if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL)) { *gsi = gsi_for_stmt (cond_stmt); gimple_regimplify_operands (cond_stmt, gsi); } e = split_block (entry_bb, cond_stmt); if (zero_iter_bb == NULL) { gassign *assign_stmt; first_zero_iter = i; zero_iter_bb = create_empty_bb (entry_bb); add_bb_to_loop (zero_iter_bb, entry_bb->loop_father); *gsi = gsi_after_labels (zero_iter_bb); assign_stmt = gimple_build_assign (fd->loop.n2, build_zero_cst (type)); gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT); set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb, entry_bb); } ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE); ne->probability = REG_BR_PROB_BASE / 2000 - 1; e->flags = EDGE_TRUE_VALUE; e->probability = REG_BR_PROB_BASE - ne->probability; if (l2_dom_bb == NULL) l2_dom_bb = entry_bb; entry_bb = e->dest; *gsi = gsi_last_bb (entry_bb); } if (POINTER_TYPE_P (itype)) itype = signed_type_for (itype); t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, fd->loops[i].step), t); t = fold_build2 (PLUS_EXPR, itype, t, fold_convert (itype, fd->loops[i].n2)); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loops[i].n1)); /* ?? We could probably use CEIL_DIV_EXPR instead of TRUNC_DIV_EXPR and adjusting by hand. Unless we can't generate the same code in the end because generically we don't know that the values involved must be negative for GT?? */ if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, fold_convert (itype, fd->loops[i].step))); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fold_convert (itype, fd->loops[i].step)); t = fold_convert (type, t); if (TREE_CODE (t) == INTEGER_CST) counts[i] = t; else { counts[i] = create_tmp_reg (type, ".count"); expand_omp_build_assign (gsi, counts[i], t); } if (SSA_VAR_P (fd->loop.n2)) { if (i == 0) t = counts[0]; else t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]); expand_omp_build_assign (gsi, fd->loop.n2, t); } } } /* Helper function for expand_omp_{for_*,simd}. Generate code like: T = V; V3 = N31 + (T % count3) * STEP3; T = T / count3; V2 = N21 + (T % count2) * STEP2; T = T / count2; V1 = N11 + T * STEP1; if this loop doesn't have an inner loop construct combined with it. If it does have an inner loop construct combined with it and the iteration count isn't known constant, store values from counts array into its _looptemp_ temporaries instead. */ static void expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi, tree *counts, gimple inner_stmt, tree startvar) { int i; if (gimple_omp_for_combined_p (fd->for_stmt)) { /* If fd->loop.n2 is constant, then no propagation of the counts is needed, they are constant. */ if (TREE_CODE (fd->loop.n2) == INTEGER_CST) return; tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL ? gimple_omp_parallel_clauses (inner_stmt) : gimple_omp_for_clauses (inner_stmt); /* First two _looptemp_ clauses are for istart/iend, counts[0] isn't supposed to be handled, as the inner loop doesn't use it. */ tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); for (i = 0; i < fd->collapse; i++) { innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); if (i) { tree tem = OMP_CLAUSE_DECL (innerc); tree t = fold_convert (TREE_TYPE (tem), counts[i]); t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); gassign *stmt = gimple_build_assign (tem, t); gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); } } return; } tree type = TREE_TYPE (fd->loop.v); tree tem = create_tmp_reg (type, ".tem"); gassign *stmt = gimple_build_assign (tem, startvar); gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); for (i = fd->collapse - 1; i >= 0; i--) { tree vtype = TREE_TYPE (fd->loops[i].v), itype, t; itype = vtype; if (POINTER_TYPE_P (vtype)) itype = signed_type_for (vtype); if (i != 0) t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]); else t = tem; t = fold_convert (itype, t); t = fold_build2 (MULT_EXPR, itype, t, fold_convert (itype, fd->loops[i].step)); if (POINTER_TYPE_P (vtype)) t = fold_build_pointer_plus (fd->loops[i].n1, t); else t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t); t = force_gimple_operand_gsi (gsi, t, DECL_P (fd->loops[i].v) && TREE_ADDRESSABLE (fd->loops[i].v), NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loops[i].v, t); gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); if (i != 0) { t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]); t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (tem, t); gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); } } } /* Helper function for expand_omp_for_*. Generate code like: L10: V3 += STEP3; if (V3 cond3 N32) goto BODY_BB; else goto L11; L11: V3 = N31; V2 += STEP2; if (V2 cond2 N22) goto BODY_BB; else goto L12; L12: V2 = N21; V1 += STEP1; goto BODY_BB; */ static basic_block extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb, basic_block body_bb) { basic_block last_bb, bb, collapse_bb = NULL; int i; gimple_stmt_iterator gsi; edge e; tree t; gimple stmt; last_bb = cont_bb; for (i = fd->collapse - 1; i >= 0; i--) { tree vtype = TREE_TYPE (fd->loops[i].v); bb = create_empty_bb (last_bb); add_bb_to_loop (bb, last_bb->loop_father); gsi = gsi_start_bb (bb); if (i < fd->collapse - 1) { e = make_edge (last_bb, bb, EDGE_FALSE_VALUE); e->probability = REG_BR_PROB_BASE / 8; t = fd->loops[i + 1].n1; t = force_gimple_operand_gsi (&gsi, t, DECL_P (fd->loops[i + 1].v) && TREE_ADDRESSABLE (fd->loops[i + 1].v), NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loops[i + 1].v, t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); } else collapse_bb = bb; set_immediate_dominator (CDI_DOMINATORS, bb, last_bb); if (POINTER_TYPE_P (vtype)) t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step); else t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step); t = force_gimple_operand_gsi (&gsi, t, DECL_P (fd->loops[i].v) && TREE_ADDRESSABLE (fd->loops[i].v), NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loops[i].v, t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); if (i > 0) { t = fd->loops[i].n2; t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); tree v = fd->loops[i].v; if (DECL_P (v) && TREE_ADDRESSABLE (v)) v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t); stmt = gimple_build_cond_empty (t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); e = make_edge (bb, body_bb, EDGE_TRUE_VALUE); e->probability = REG_BR_PROB_BASE * 7 / 8; } else make_edge (bb, body_bb, EDGE_FALLTHRU); last_bb = bb; } return collapse_bb; } /* A subroutine of expand_omp_for. Generate code for a parallel loop with any schedule. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0); if (more) goto L0; else goto L3; L0: V = istart0; iend = iend0; L1: BODY; V += STEP; if (V cond iend) goto L1; else goto L2; L2: if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3; L3: If this is a combined omp parallel loop, instead of the call to GOMP_loop_foo_start, we call GOMP_loop_foo_next. If this is gimple_omp_for_combined_p loop, then instead of assigning V and iend in L0 we assign the first two _looptemp_ clause decls of the inner GIMPLE_OMP_FOR and V += STEP; and if (V cond iend) goto L1; else goto L2; are removed. For collapsed loops, given parameters: collapse(3) for (V1 = N11; V1 cond1 N12; V1 += STEP1) for (V2 = N21; V2 cond2 N22; V2 += STEP2) for (V3 = N31; V3 cond3 N32; V3 += STEP3) BODY; we generate pseudocode if (__builtin_expect (N32 cond3 N31, 0)) goto Z0; if (cond3 is <) adj = STEP3 - 1; else adj = STEP3 + 1; count3 = (adj + N32 - N31) / STEP3; if (__builtin_expect (N22 cond2 N21, 0)) goto Z0; if (cond2 is <) adj = STEP2 - 1; else adj = STEP2 + 1; count2 = (adj + N22 - N21) / STEP2; if (__builtin_expect (N12 cond1 N11, 0)) goto Z0; if (cond1 is <) adj = STEP1 - 1; else adj = STEP1 + 1; count1 = (adj + N12 - N11) / STEP1; count = count1 * count2 * count3; goto Z1; Z0: count = 0; Z1: more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0); if (more) goto L0; else goto L3; L0: V = istart0; T = V; V3 = N31 + (T % count3) * STEP3; T = T / count3; V2 = N21 + (T % count2) * STEP2; T = T / count2; V1 = N11 + T * STEP1; iend = iend0; L1: BODY; V += 1; if (V < iend) goto L10; else goto L2; L10: V3 += STEP3; if (V3 cond3 N32) goto L1; else goto L11; L11: V3 = N31; V2 += STEP2; if (V2 cond2 N22) goto L1; else goto L12; L12: V2 = N21; V1 += STEP1; goto L1; L2: if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3; L3: */ static void expand_omp_for_generic (struct omp_region *region, struct omp_for_data *fd, enum built_in_function start_fn, enum built_in_function next_fn, gimple inner_stmt) { tree type, istart0, iend0, iend; tree t, vmain, vback, bias = NULL_TREE; basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb; basic_block l2_bb = NULL, l3_bb = NULL; gimple_stmt_iterator gsi; gassign *assign_stmt; bool in_combined_parallel = is_combined_parallel (region); bool broken_loop = region->cont == NULL; edge e, ne; tree *counts = NULL; int i; gcc_assert (!broken_loop || !in_combined_parallel); gcc_assert (fd->iter_type == long_integer_type_node || !in_combined_parallel); type = TREE_TYPE (fd->loop.v); istart0 = create_tmp_var (fd->iter_type, ".istart0"); iend0 = create_tmp_var (fd->iter_type, ".iend0"); TREE_ADDRESSABLE (istart0) = 1; TREE_ADDRESSABLE (iend0) = 1; /* See if we need to bias by LLONG_MIN. */ if (fd->iter_type == long_long_unsigned_type_node && TREE_CODE (type) == INTEGER_TYPE && !TYPE_UNSIGNED (type)) { tree n1, n2; if (fd->loop.cond_code == LT_EXPR) { n1 = fd->loop.n1; n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step); } else { n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step); n2 = fd->loop.n1; } if (TREE_CODE (n1) != INTEGER_CST || TREE_CODE (n2) != INTEGER_CST || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0))) bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type)); } entry_bb = region->entry; cont_bb = region->cont; collapse_bb = NULL; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); gcc_assert (broken_loop || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); l0_bb = split_edge (FALLTHRU_EDGE (entry_bb)); l1_bb = single_succ (l0_bb); if (!broken_loop) { l2_bb = create_empty_bb (cont_bb); gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); } else l2_bb = NULL; l3_bb = BRANCH_EDGE (entry_bb)->dest; exit_bb = region->exit; gsi = gsi_last_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); if (fd->collapse > 1) { int first_zero_iter = -1; basic_block zero_iter_bb = NULL, l2_dom_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, zero_iter_bb, first_zero_iter, l2_dom_bb); if (zero_iter_bb) { /* Some counts[i] vars might be uninitialized if some loop has zero iterations. But the body shouldn't be executed in that case, so just avoid uninit warnings. */ for (i = first_zero_iter; i < fd->collapse; i++) if (SSA_VAR_P (counts[i])) TREE_NO_WARNING (counts[i]) = 1; gsi_prev (&gsi); e = split_block (entry_bb, gsi_stmt (gsi)); entry_bb = e->dest; make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU); gsi = gsi_last_bb (entry_bb); set_immediate_dominator (CDI_DOMINATORS, entry_bb, get_immediate_dominator (CDI_DOMINATORS, zero_iter_bb)); } } if (in_combined_parallel) { /* In a combined parallel loop, emit a call to GOMP_loop_foo_next. */ t = build_call_expr (builtin_decl_explicit (next_fn), 2, build_fold_addr_expr (istart0), build_fold_addr_expr (iend0)); } else { tree t0, t1, t2, t3, t4; /* If this is not a combined parallel loop, emit a call to GOMP_loop_foo_start in ENTRY_BB. */ t4 = build_fold_addr_expr (iend0); t3 = build_fold_addr_expr (istart0); t2 = fold_convert (fd->iter_type, fd->loop.step); t1 = fd->loop.n2; t0 = fd->loop.n1; if (gimple_omp_for_combined_into_p (fd->for_stmt)) { tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); t0 = OMP_CLAUSE_DECL (innerc); innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); t1 = OMP_CLAUSE_DECL (innerc); } if (POINTER_TYPE_P (TREE_TYPE (t0)) && TYPE_PRECISION (TREE_TYPE (t0)) != TYPE_PRECISION (fd->iter_type)) { /* Avoid casting pointers to integer of a different size. */ tree itype = signed_type_for (type); t1 = fold_convert (fd->iter_type, fold_convert (itype, t1)); t0 = fold_convert (fd->iter_type, fold_convert (itype, t0)); } else { t1 = fold_convert (fd->iter_type, t1); t0 = fold_convert (fd->iter_type, t0); } if (bias) { t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias); t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias); } if (fd->iter_type == long_integer_type_node) { if (fd->chunk_size) { t = fold_convert (fd->iter_type, fd->chunk_size); t = build_call_expr (builtin_decl_explicit (start_fn), 6, t0, t1, t2, t, t3, t4); } else t = build_call_expr (builtin_decl_explicit (start_fn), 5, t0, t1, t2, t3, t4); } else { tree t5; tree c_bool_type; tree bfn_decl; /* The GOMP_loop_ull_*start functions have additional boolean argument, true for < loops and false for > loops. In Fortran, the C bool type can be different from boolean_type_node. */ bfn_decl = builtin_decl_explicit (start_fn); c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl)); t5 = build_int_cst (c_bool_type, fd->loop.cond_code == LT_EXPR ? 1 : 0); if (fd->chunk_size) { tree bfn_decl = builtin_decl_explicit (start_fn); t = fold_convert (fd->iter_type, fd->chunk_size); t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4); } else t = build_call_expr (builtin_decl_explicit (start_fn), 6, t5, t0, t1, t2, t3, t4); } } if (TREE_TYPE (t) != boolean_type_node) t = fold_build2 (NE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR statement. */ gsi_remove (&gsi, true); /* Iteration setup for sequential loop goes in L0_BB. */ tree startvar = fd->loop.v; tree endvar = NULL_TREE; if (gimple_omp_for_combined_p (fd->for_stmt)) { gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR && gimple_omp_for_kind (inner_stmt) == GF_OMP_FOR_KIND_SIMD); tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); startvar = OMP_CLAUSE_DECL (innerc); innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); endvar = OMP_CLAUSE_DECL (innerc); } gsi = gsi_start_bb (l0_bb); t = istart0; if (bias) t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias); if (POINTER_TYPE_P (TREE_TYPE (startvar))) t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t); t = fold_convert (TREE_TYPE (startvar), t); t = force_gimple_operand_gsi (&gsi, t, DECL_P (startvar) && TREE_ADDRESSABLE (startvar), NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (startvar, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); t = iend0; if (bias) t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias); if (POINTER_TYPE_P (TREE_TYPE (startvar))) t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t); t = fold_convert (TREE_TYPE (startvar), t); iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (endvar) { assign_stmt = gimple_build_assign (endvar, iend); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend))) assign_stmt = gimple_build_assign (fd->loop.v, iend); else assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, iend); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } if (fd->collapse > 1) expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar); if (!broken_loop) { /* Code to control the increment and predicate for the sequential loop goes in the CONT_BB. */ gsi = gsi_last_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (cont_stmt); vback = gimple_omp_continue_control_def (cont_stmt); if (!gimple_omp_for_combined_p (fd->for_stmt)) { if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (vmain, fd->loop.step); else t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step); t = force_gimple_operand_gsi (&gsi, t, DECL_P (vback) && TREE_ADDRESSABLE (vback), NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (vback, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); t = build2 (fd->loop.cond_code, boolean_type_node, DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, iend); gcond *cond_stmt = gimple_build_cond_empty (t); gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT); } /* Remove GIMPLE_OMP_CONTINUE. */ gsi_remove (&gsi, true); if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt)) collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb); /* Emit code to get the next parallel iteration in L2_BB. */ gsi = gsi_start_bb (l2_bb); t = build_call_expr (builtin_decl_explicit (next_fn), 2, build_fold_addr_expr (istart0), build_fold_addr_expr (iend0)); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (TREE_TYPE (t) != boolean_type_node) t = fold_build2 (NE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); gcond *cond_stmt = gimple_build_cond_empty (t); gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING); } /* Add the loop cleanup function. */ gsi = gsi_last_bb (exit_bb); if (gimple_omp_return_nowait_p (gsi_stmt (gsi))) t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT); else if (gimple_omp_return_lhs (gsi_stmt (gsi))) t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL); else t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END); gcall *call_stmt = gimple_build_call (t, 0); if (gimple_omp_return_lhs (gsi_stmt (gsi))) gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi))); gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT); gsi_remove (&gsi, true); /* Connect the new blocks. */ find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE; find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE; if (!broken_loop) { gimple_seq phis; e = find_edge (cont_bb, l3_bb); ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE); phis = phi_nodes (l3_bb); for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple phi = gsi_stmt (gsi); SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne), PHI_ARG_DEF_FROM_EDGE (phi, e)); } remove_edge (e); make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE); add_bb_to_loop (l2_bb, cont_bb->loop_father); e = find_edge (cont_bb, l1_bb); if (gimple_omp_for_combined_p (fd->for_stmt)) { remove_edge (e); e = NULL; } else if (fd->collapse > 1) { remove_edge (e); e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE); } else e->flags = EDGE_TRUE_VALUE; if (e) { e->probability = REG_BR_PROB_BASE * 7 / 8; find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8; } else { e = find_edge (cont_bb, l2_bb); e->flags = EDGE_FALLTHRU; } make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE); set_immediate_dominator (CDI_DOMINATORS, l2_bb, recompute_dominator (CDI_DOMINATORS, l2_bb)); set_immediate_dominator (CDI_DOMINATORS, l3_bb, recompute_dominator (CDI_DOMINATORS, l3_bb)); set_immediate_dominator (CDI_DOMINATORS, l0_bb, recompute_dominator (CDI_DOMINATORS, l0_bb)); set_immediate_dominator (CDI_DOMINATORS, l1_bb, recompute_dominator (CDI_DOMINATORS, l1_bb)); struct loop *outer_loop = alloc_loop (); outer_loop->header = l0_bb; outer_loop->latch = l2_bb; add_loop (outer_loop, l0_bb->loop_father); if (!gimple_omp_for_combined_p (fd->for_stmt)) { struct loop *loop = alloc_loop (); loop->header = l1_bb; /* The loop may have multiple latches. */ add_loop (loop, outer_loop); } } } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and no specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2; if (cond is <) adj = STEP - 1; else adj = STEP + 1; if ((__typeof (V)) -1 > 0 && cond is >) n = -(adj + N2 - N1) / -STEP; else n = (adj + N2 - N1) / STEP; q = n / nthreads; tt = n % nthreads; if (threadid < tt) goto L3; else goto L4; L3: tt = 0; q = q + 1; L4: s0 = q * threadid + tt; e0 = s0 + q; V = s0 * STEP + N1; if (s0 >= e0) goto L2; else goto L0; L0: e = e0 * STEP + N1; L1: BODY; V += STEP; if (V cond e) goto L1; L2: */ static void expand_omp_for_static_nochunk (struct omp_region *region, struct omp_for_data *fd, gimple inner_stmt) { tree n, q, s0, e0, e, t, tt, nthreads, threadid; tree type, itype, vmain, vback; basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb; basic_block body_bb, cont_bb, collapse_bb = NULL; basic_block fin_bb; gimple_stmt_iterator gsi; edge ep; bool broken_loop = region->cont == NULL; tree *counts = NULL; tree n1, n2, step; gcc_checking_assert ((gimple_omp_for_kind (fd->for_stmt) != GF_OMP_FOR_KIND_OACC_LOOP) || !inner_stmt); itype = type = TREE_TYPE (fd->loop.v); if (POINTER_TYPE_P (type)) itype = signed_type_for (type); entry_bb = region->entry; cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); fin_bb = BRANCH_EDGE (entry_bb)->dest; gcc_assert (broken_loop || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest)); seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb)); body_bb = single_succ (seq_start_bb); if (!broken_loop) { gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); } exit_bb = region->exit; /* Iteration space partitioning goes in ENTRY_BB. */ gsi = gsi_last_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); if (fd->collapse > 1) { int first_zero_iter = -1; basic_block l2_dom_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, fin_bb, first_zero_iter, l2_dom_bb); t = NULL_TREE; } else if (gimple_omp_for_combined_into_p (fd->for_stmt)) t = integer_one_node; else t = fold_binary (fd->loop.cond_code, boolean_type_node, fold_convert (type, fd->loop.n1), fold_convert (type, fd->loop.n2)); if (fd->collapse == 1 && TYPE_UNSIGNED (type) && (t == NULL_TREE || !integer_onep (t))) { n1 = fold_convert (type, unshare_expr (fd->loop.n1)); n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE, true, GSI_SAME_STMT); n2 = fold_convert (type, unshare_expr (fd->loop.n2)); n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE, true, GSI_SAME_STMT); gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT); if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL)) { gsi = gsi_for_stmt (cond_stmt); gimple_regimplify_operands (cond_stmt, &gsi); } ep = split_block (entry_bb, cond_stmt); ep->flags = EDGE_TRUE_VALUE; entry_bb = ep->dest; ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1); ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE); ep->probability = REG_BR_PROB_BASE / 2000 - 1; if (gimple_in_ssa_p (cfun)) { int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx; for (gphi_iterator gpi = gsi_start_phis (fin_bb); !gsi_end_p (gpi); gsi_next (&gpi)) { gphi *phi = gpi.phi (); add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx), ep, UNKNOWN_LOCATION); } } gsi = gsi_last_bb (entry_bb); } switch (gimple_omp_for_kind (fd->for_stmt)) { case GF_OMP_FOR_KIND_FOR: nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); break; case GF_OMP_FOR_KIND_DISTRIBUTE: nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS); threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM); break; case GF_OMP_FOR_KIND_OACC_LOOP: nthreads = builtin_decl_explicit (BUILT_IN_GOACC_GET_NUM_THREADS); threadid = builtin_decl_explicit (BUILT_IN_GOACC_GET_THREAD_NUM); break; default: gcc_unreachable (); } nthreads = build_call_expr (nthreads, 0); nthreads = fold_convert (itype, nthreads); nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE, true, GSI_SAME_STMT); threadid = build_call_expr (threadid, 0); threadid = fold_convert (itype, threadid); threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE, true, GSI_SAME_STMT); n1 = fd->loop.n1; n2 = fd->loop.n2; step = fd->loop.step; if (gimple_omp_for_combined_into_p (fd->for_stmt)) { tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); } n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1), true, NULL_TREE, true, GSI_SAME_STMT); n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2), true, NULL_TREE, true, GSI_SAME_STMT); step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step), true, NULL_TREE, true, GSI_SAME_STMT); t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, step, t); t = fold_build2 (PLUS_EXPR, itype, t, n2); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1)); if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, step)); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step); t = fold_convert (itype, t); n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); q = create_tmp_reg (itype, "q"); t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT); gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT); tt = create_tmp_reg (itype, "tt"); t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT); gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT); t = build2 (LT_EXPR, boolean_type_node, threadid, tt); gcond *cond_stmt = gimple_build_cond_empty (t); gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT); second_bb = split_block (entry_bb, cond_stmt)->dest; gsi = gsi_last_bb (second_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)), GSI_SAME_STMT); gassign *assign_stmt = gimple_build_assign (q, PLUS_EXPR, q, build_int_cst (itype, 1)); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); third_bb = split_block (second_bb, assign_stmt)->dest; gsi = gsi_last_bb (third_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); t = build2 (MULT_EXPR, itype, q, threadid); t = build2 (PLUS_EXPR, itype, t, tt); s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (PLUS_EXPR, itype, s0, q); e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = build2 (GE_EXPR, boolean_type_node, s0, e0); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR statement. */ gsi_remove (&gsi, true); /* Setup code for sequential iteration goes in SEQ_START_BB. */ gsi = gsi_start_bb (seq_start_bb); tree startvar = fd->loop.v; tree endvar = NULL_TREE; if (gimple_omp_for_combined_p (fd->for_stmt)) { tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL ? gimple_omp_parallel_clauses (inner_stmt) : gimple_omp_for_clauses (inner_stmt); tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); startvar = OMP_CLAUSE_DECL (innerc); innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); endvar = OMP_CLAUSE_DECL (innerc); } t = fold_convert (itype, s0); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (n1, t); else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (TREE_TYPE (startvar), t); t = force_gimple_operand_gsi (&gsi, t, DECL_P (startvar) && TREE_ADDRESSABLE (startvar), NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (startvar, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); t = fold_convert (itype, e0); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (n1, t); else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (TREE_TYPE (startvar), t); e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (endvar) { assign_stmt = gimple_build_assign (endvar, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e))) assign_stmt = gimple_build_assign (fd->loop.v, e); else assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } if (fd->collapse > 1) expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar); if (!broken_loop) { /* The code controlling the sequential loop replaces the GIMPLE_OMP_CONTINUE. */ gsi = gsi_last_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (cont_stmt); vback = gimple_omp_continue_control_def (cont_stmt); if (!gimple_omp_for_combined_p (fd->for_stmt)) { if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (vmain, step); else t = fold_build2 (PLUS_EXPR, type, vmain, step); t = force_gimple_operand_gsi (&gsi, t, DECL_P (vback) && TREE_ADDRESSABLE (vback), NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (vback, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); t = build2 (fd->loop.cond_code, boolean_type_node, DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); } /* Remove the GIMPLE_OMP_CONTINUE statement. */ gsi_remove (&gsi, true); if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt)) collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb); } /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */ gsi = gsi_last_bb (exit_bb); if (!gimple_omp_return_nowait_p (gsi_stmt (gsi))) { t = gimple_omp_return_lhs (gsi_stmt (gsi)); if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP) gcc_checking_assert (t == NULL_TREE); else gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT); } gsi_remove (&gsi, true); /* Connect all the blocks. */ ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE); ep->probability = REG_BR_PROB_BASE / 4 * 3; ep = find_edge (entry_bb, second_bb); ep->flags = EDGE_TRUE_VALUE; ep->probability = REG_BR_PROB_BASE / 4; find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE; find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE; if (!broken_loop) { ep = find_edge (cont_bb, body_bb); if (gimple_omp_for_combined_p (fd->for_stmt)) { remove_edge (ep); ep = NULL; } else if (fd->collapse > 1) { remove_edge (ep); ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE); } else ep->flags = EDGE_TRUE_VALUE; find_edge (cont_bb, fin_bb)->flags = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU; } set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb); set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb); set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb); set_immediate_dominator (CDI_DOMINATORS, body_bb, recompute_dominator (CDI_DOMINATORS, body_bb)); set_immediate_dominator (CDI_DOMINATORS, fin_bb, recompute_dominator (CDI_DOMINATORS, fin_bb)); if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt)) { struct loop *loop = alloc_loop (); loop->header = body_bb; if (collapse_bb == NULL) loop->latch = cont_bb; add_loop (loop, body_bb->loop_father); } } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and a specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2; if (cond is <) adj = STEP - 1; else adj = STEP + 1; if ((__typeof (V)) -1 > 0 && cond is >) n = -(adj + N2 - N1) / -STEP; else n = (adj + N2 - N1) / STEP; trip = 0; V = threadid * CHUNK * STEP + N1; -- this extra definition of V is here so that V is defined if the loop is not entered L0: s0 = (trip * nthreads + threadid) * CHUNK; e0 = min(s0 + CHUNK, n); if (s0 < n) goto L1; else goto L4; L1: V = s0 * STEP + N1; e = e0 * STEP + N1; L2: BODY; V += STEP; if (V cond e) goto L2; else goto L3; L3: trip += 1; goto L0; L4: */ static void expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd, gimple inner_stmt) { tree n, s0, e0, e, t; tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid; tree type, itype, vmain, vback, vextra; basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb; basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb; gimple_stmt_iterator gsi; edge se; bool broken_loop = region->cont == NULL; tree *counts = NULL; tree n1, n2, step; gcc_checking_assert ((gimple_omp_for_kind (fd->for_stmt) != GF_OMP_FOR_KIND_OACC_LOOP) || !inner_stmt); itype = type = TREE_TYPE (fd->loop.v); if (POINTER_TYPE_P (type)) itype = signed_type_for (type); entry_bb = region->entry; se = split_block (entry_bb, last_stmt (entry_bb)); entry_bb = se->src; iter_part_bb = se->dest; cont_bb = region->cont; gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2); fin_bb = BRANCH_EDGE (iter_part_bb)->dest; gcc_assert (broken_loop || fin_bb == FALLTHRU_EDGE (cont_bb)->dest); seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb)); body_bb = single_succ (seq_start_bb); if (!broken_loop) { gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb)); } exit_bb = region->exit; /* Trip and adjustment setup goes in ENTRY_BB. */ gsi = gsi_last_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); if (fd->collapse > 1) { int first_zero_iter = -1; basic_block l2_dom_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, fin_bb, first_zero_iter, l2_dom_bb); t = NULL_TREE; } else if (gimple_omp_for_combined_into_p (fd->for_stmt)) t = integer_one_node; else t = fold_binary (fd->loop.cond_code, boolean_type_node, fold_convert (type, fd->loop.n1), fold_convert (type, fd->loop.n2)); if (fd->collapse == 1 && TYPE_UNSIGNED (type) && (t == NULL_TREE || !integer_onep (t))) { n1 = fold_convert (type, unshare_expr (fd->loop.n1)); n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE, true, GSI_SAME_STMT); n2 = fold_convert (type, unshare_expr (fd->loop.n2)); n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE, true, GSI_SAME_STMT); gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT); if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL)) { gsi = gsi_for_stmt (cond_stmt); gimple_regimplify_operands (cond_stmt, &gsi); } se = split_block (entry_bb, cond_stmt); se->flags = EDGE_TRUE_VALUE; entry_bb = se->dest; se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1); se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE); se->probability = REG_BR_PROB_BASE / 2000 - 1; if (gimple_in_ssa_p (cfun)) { int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx; for (gphi_iterator gpi = gsi_start_phis (fin_bb); !gsi_end_p (gpi); gsi_next (&gpi)) { gphi *phi = gpi.phi (); add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx), se, UNKNOWN_LOCATION); } } gsi = gsi_last_bb (entry_bb); } switch (gimple_omp_for_kind (fd->for_stmt)) { case GF_OMP_FOR_KIND_FOR: nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); break; case GF_OMP_FOR_KIND_DISTRIBUTE: nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS); threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM); break; case GF_OMP_FOR_KIND_OACC_LOOP: nthreads = builtin_decl_explicit (BUILT_IN_GOACC_GET_NUM_THREADS); threadid = builtin_decl_explicit (BUILT_IN_GOACC_GET_THREAD_NUM); break; default: gcc_unreachable (); } nthreads = build_call_expr (nthreads, 0); nthreads = fold_convert (itype, nthreads); nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE, true, GSI_SAME_STMT); threadid = build_call_expr (threadid, 0); threadid = fold_convert (itype, threadid); threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE, true, GSI_SAME_STMT); n1 = fd->loop.n1; n2 = fd->loop.n2; step = fd->loop.step; if (gimple_omp_for_combined_into_p (fd->for_stmt)) { tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); } n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1), true, NULL_TREE, true, GSI_SAME_STMT); n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2), true, NULL_TREE, true, GSI_SAME_STMT); step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step), true, NULL_TREE, true, GSI_SAME_STMT); fd->chunk_size = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->chunk_size), true, NULL_TREE, true, GSI_SAME_STMT); t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, step, t); t = fold_build2 (PLUS_EXPR, itype, t, n2); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1)); if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, step)); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step); t = fold_convert (itype, t); n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); trip_var = create_tmp_reg (itype, ".trip"); if (gimple_in_ssa_p (cfun)) { trip_init = make_ssa_name (trip_var); trip_main = make_ssa_name (trip_var); trip_back = make_ssa_name (trip_var); } else { trip_init = trip_var; trip_main = trip_var; trip_back = trip_var; } gassign *assign_stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0)); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (n1, t); else t = fold_build2 (PLUS_EXPR, type, t, n1); vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR. */ gsi_remove (&gsi, true); /* Iteration space partitioning goes in ITER_PART_BB. */ gsi = gsi_last_bb (iter_part_bb); t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads); t = fold_build2 (PLUS_EXPR, itype, t, threadid); t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size); s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size); t = fold_build2 (MIN_EXPR, itype, t, n); e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = build2 (LT_EXPR, boolean_type_node, s0, n); gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING); /* Setup code for sequential iteration goes in SEQ_START_BB. */ gsi = gsi_start_bb (seq_start_bb); tree startvar = fd->loop.v; tree endvar = NULL_TREE; if (gimple_omp_for_combined_p (fd->for_stmt)) { tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL ? gimple_omp_parallel_clauses (inner_stmt) : gimple_omp_for_clauses (inner_stmt); tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); startvar = OMP_CLAUSE_DECL (innerc); innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); endvar = OMP_CLAUSE_DECL (innerc); } t = fold_convert (itype, s0); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (n1, t); else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (TREE_TYPE (startvar), t); t = force_gimple_operand_gsi (&gsi, t, DECL_P (startvar) && TREE_ADDRESSABLE (startvar), NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (startvar, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); t = fold_convert (itype, e0); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (n1, t); else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (TREE_TYPE (startvar), t); e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (endvar) { assign_stmt = gimple_build_assign (endvar, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e))) assign_stmt = gimple_build_assign (fd->loop.v, e); else assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } if (fd->collapse > 1) expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar); if (!broken_loop) { /* The code controlling the sequential loop goes in CONT_BB, replacing the GIMPLE_OMP_CONTINUE. */ gsi = gsi_last_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); vmain = gimple_omp_continue_control_use (cont_stmt); vback = gimple_omp_continue_control_def (cont_stmt); if (!gimple_omp_for_combined_p (fd->for_stmt)) { if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (vmain, step); else t = fold_build2 (PLUS_EXPR, type, vmain, step); if (DECL_P (vback) && TREE_ADDRESSABLE (vback)) t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (vback, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); t = build2 (fd->loop.cond_code, boolean_type_node, DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); } /* Remove GIMPLE_OMP_CONTINUE. */ gsi_remove (&gsi, true); if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt)) collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb); /* Trip update code goes into TRIP_UPDATE_BB. */ gsi = gsi_start_bb (trip_update_bb); t = build_int_cst (itype, 1); t = build2 (PLUS_EXPR, itype, trip_main, t); assign_stmt = gimple_build_assign (trip_back, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */ gsi = gsi_last_bb (exit_bb); if (!gimple_omp_return_nowait_p (gsi_stmt (gsi))) { t = gimple_omp_return_lhs (gsi_stmt (gsi)); if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP) gcc_checking_assert (t == NULL_TREE); else gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT); } gsi_remove (&gsi, true); /* Connect the new blocks. */ find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE; find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE; if (!broken_loop) { se = find_edge (cont_bb, body_bb); if (gimple_omp_for_combined_p (fd->for_stmt)) { remove_edge (se); se = NULL; } else if (fd->collapse > 1) { remove_edge (se); se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE); } else se->flags = EDGE_TRUE_VALUE; find_edge (cont_bb, trip_update_bb)->flags = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU; redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb); } if (gimple_in_ssa_p (cfun)) { gphi_iterator psi; gphi *phi; edge re, ene; edge_var_map *vm; size_t i; gcc_assert (fd->collapse == 1 && !broken_loop); /* When we redirect the edge from trip_update_bb to iter_part_bb, we remove arguments of the phi nodes in fin_bb. We need to create appropriate phi nodes in iter_part_bb instead. */ se = single_pred_edge (fin_bb); re = single_succ_edge (trip_update_bb); vec<edge_var_map> *head = redirect_edge_var_map_vector (re); ene = single_succ_edge (entry_bb); psi = gsi_start_phis (fin_bb); for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm); gsi_next (&psi), ++i) { gphi *nphi; source_location locus; phi = psi.phi (); t = gimple_phi_result (phi); gcc_assert (t == redirect_edge_var_map_result (vm)); nphi = create_phi_node (t, iter_part_bb); t = PHI_ARG_DEF_FROM_EDGE (phi, se); locus = gimple_phi_arg_location_from_edge (phi, se); /* A special case -- fd->loop.v is not yet computed in iter_part_bb, we need to use vextra instead. */ if (t == fd->loop.v) t = vextra; add_phi_arg (nphi, t, ene, locus); locus = redirect_edge_var_map_location (vm); add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus); } gcc_assert (gsi_end_p (psi) && i == head->length ()); redirect_edge_var_map_clear (re); while (1) { psi = gsi_start_phis (fin_bb); if (gsi_end_p (psi)) break; remove_phi_node (&psi, false); } /* Make phi node for trip. */ phi = create_phi_node (trip_main, iter_part_bb); add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb), UNKNOWN_LOCATION); add_phi_arg (phi, trip_init, single_succ_edge (entry_bb), UNKNOWN_LOCATION); } if (!broken_loop) set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb); set_immediate_dominator (CDI_DOMINATORS, iter_part_bb, recompute_dominator (CDI_DOMINATORS, iter_part_bb)); set_immediate_dominator (CDI_DOMINATORS, fin_bb, recompute_dominator (CDI_DOMINATORS, fin_bb)); set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, recompute_dominator (CDI_DOMINATORS, seq_start_bb)); set_immediate_dominator (CDI_DOMINATORS, body_bb, recompute_dominator (CDI_DOMINATORS, body_bb)); if (!broken_loop) { struct loop *trip_loop = alloc_loop (); trip_loop->header = iter_part_bb; trip_loop->latch = trip_update_bb; add_loop (trip_loop, iter_part_bb->loop_father); if (!gimple_omp_for_combined_p (fd->for_stmt)) { struct loop *loop = alloc_loop (); loop->header = body_bb; if (collapse_bb == NULL) loop->latch = cont_bb; add_loop (loop, trip_loop); } } } /* A subroutine of expand_omp_for. Generate code for _Cilk_for loop. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">" or "!=", we generate pseudocode for (ind_var = low; ind_var < high; ind_var++) { V = n1 + (ind_var * STEP) <BODY> } In the above pseudocode, low and high are function parameters of the child function. In the function below, we are inserting a temp. variable that will be making a call to two OMP functions that will not be found in the body of _Cilk_for (since OMP_FOR cannot be mixed with _Cilk_for). These functions are replaced with low and high by the function that handles taskreg. */ static void expand_cilk_for (struct omp_region *region, struct omp_for_data *fd) { bool broken_loop = region->cont == NULL; basic_block entry_bb = region->entry; basic_block cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); gcc_assert (broken_loop || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); basic_block l0_bb = FALLTHRU_EDGE (entry_bb)->dest; basic_block l1_bb, l2_bb; if (!broken_loop) { gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest; l2_bb = BRANCH_EDGE (entry_bb)->dest; } else { BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL; l1_bb = split_edge (BRANCH_EDGE (entry_bb)); l2_bb = single_succ (l1_bb); } basic_block exit_bb = region->exit; basic_block l2_dom_bb = NULL; gimple_stmt_iterator gsi = gsi_last_bb (entry_bb); /* Below statements until the "tree high_val = ..." are pseudo statements used to pass information to be used by expand_omp_taskreg. low_val and high_val will be replaced by the __low and __high parameter from the child function. The call_exprs part is a place-holder, it is mainly used to distinctly identify to the top-level part that this is where we should put low and high (reasoning given in header comment). */ tree child_fndecl = gimple_omp_parallel_child_fn ( as_a <gomp_parallel *> (last_stmt (region->outer->entry))); tree t, low_val = NULL_TREE, high_val = NULL_TREE; for (t = DECL_ARGUMENTS (child_fndecl); t; t = TREE_CHAIN (t)) { if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__high")) high_val = t; else if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__low")) low_val = t; } gcc_assert (low_val && high_val); tree type = TREE_TYPE (low_val); tree ind_var = create_tmp_reg (type, "__cilk_ind_var"); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); /* Not needed in SSA form right now. */ gcc_assert (!gimple_in_ssa_p (cfun)); if (l2_dom_bb == NULL) l2_dom_bb = l1_bb; tree n1 = low_val; tree n2 = high_val; gimple stmt = gimple_build_assign (ind_var, n1); /* Replace the GIMPLE_OMP_FOR statement. */ gsi_replace (&gsi, stmt, true); if (!broken_loop) { /* Code to control the increment goes in the CONT_BB. */ gsi = gsi_last_bb (cont_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE); stmt = gimple_build_assign (ind_var, PLUS_EXPR, ind_var, build_one_cst (type)); /* Replace GIMPLE_OMP_CONTINUE. */ gsi_replace (&gsi, stmt, true); } /* Emit the condition in L1_BB. */ gsi = gsi_after_labels (l1_bb); t = fold_build2 (MULT_EXPR, TREE_TYPE (fd->loop.step), fold_convert (TREE_TYPE (fd->loop.step), ind_var), fd->loop.step); if (POINTER_TYPE_P (TREE_TYPE (fd->loop.n1))) t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (fd->loop.n1), fd->loop.n1, fold_convert (sizetype, t)); else t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loop.n1), fd->loop.n1, fold_convert (TREE_TYPE (fd->loop.n1), t)); t = fold_convert (TREE_TYPE (fd->loop.v), t); expand_omp_build_assign (&gsi, fd->loop.v, t); /* The condition is always '<' since the runtime will fill in the low and high values. */ stmt = gimple_build_cond (LT_EXPR, ind_var, n2, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); /* Remove GIMPLE_OMP_RETURN. */ gsi = gsi_last_bb (exit_bb); gsi_remove (&gsi, true); /* Connect the new blocks. */ remove_edge (FALLTHRU_EDGE (entry_bb)); edge e, ne; if (!broken_loop) { remove_edge (BRANCH_EDGE (entry_bb)); make_edge (entry_bb, l1_bb, EDGE_FALLTHRU); e = BRANCH_EDGE (l1_bb); ne = FALLTHRU_EDGE (l1_bb); e->flags = EDGE_TRUE_VALUE; } else { single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; ne = single_succ_edge (l1_bb); e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE); } ne->flags = EDGE_FALSE_VALUE; e->probability = REG_BR_PROB_BASE * 7 / 8; ne->probability = REG_BR_PROB_BASE / 8; set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb); set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb); set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb); if (!broken_loop) { struct loop *loop = alloc_loop (); loop->header = l1_bb; loop->latch = cont_bb; add_loop (loop, l1_bb->loop_father); loop->safelen = INT_MAX; } /* Pick the correct library function based on the precision of the induction variable type. */ tree lib_fun = NULL_TREE; if (TYPE_PRECISION (type) == 32) lib_fun = cilk_for_32_fndecl; else if (TYPE_PRECISION (type) == 64) lib_fun = cilk_for_64_fndecl; else gcc_unreachable (); gcc_assert (fd->sched_kind == OMP_CLAUSE_SCHEDULE_CILKFOR); /* WS_ARGS contains the library function flavor to call: __libcilkrts_cilk_for_64 or __libcilkrts_cilk_for_32), and the user-defined grain value. If the user does not define one, then zero is passed in by the parser. */ vec_alloc (region->ws_args, 2); region->ws_args->quick_push (lib_fun); region->ws_args->quick_push (fd->chunk_size); } /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing loop. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode V = N1; goto L1; L0: BODY; V += STEP; L1: if (V cond N2) goto L0; else goto L2; L2: For collapsed loops, given parameters: collapse(3) for (V1 = N11; V1 cond1 N12; V1 += STEP1) for (V2 = N21; V2 cond2 N22; V2 += STEP2) for (V3 = N31; V3 cond3 N32; V3 += STEP3) BODY; we generate pseudocode if (cond3 is <) adj = STEP3 - 1; else adj = STEP3 + 1; count3 = (adj + N32 - N31) / STEP3; if (cond2 is <) adj = STEP2 - 1; else adj = STEP2 + 1; count2 = (adj + N22 - N21) / STEP2; if (cond1 is <) adj = STEP1 - 1; else adj = STEP1 + 1; count1 = (adj + N12 - N11) / STEP1; count = count1 * count2 * count3; V = 0; V1 = N11; V2 = N21; V3 = N31; goto L1; L0: BODY; V += 1; V3 += STEP3; V2 += (V3 cond3 N32) ? 0 : STEP2; V3 = (V3 cond3 N32) ? V3 : N31; V1 += (V2 cond2 N22) ? 0 : STEP1; V2 = (V2 cond2 N22) ? V2 : N21; L1: if (V < count) goto L0; else goto L2; L2: */ static void expand_omp_simd (struct omp_region *region, struct omp_for_data *fd) { tree type, t; basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb; gimple_stmt_iterator gsi; gimple stmt; gcond *cond_stmt; bool broken_loop = region->cont == NULL; edge e, ne; tree *counts = NULL; int i; tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE_SAFELEN); tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__SIMDUID_); tree n1, n2; type = TREE_TYPE (fd->loop.v); entry_bb = region->entry; cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); gcc_assert (broken_loop || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); l0_bb = FALLTHRU_EDGE (entry_bb)->dest; if (!broken_loop) { gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest; l2_bb = BRANCH_EDGE (entry_bb)->dest; } else { BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL; l1_bb = split_edge (BRANCH_EDGE (entry_bb)); l2_bb = single_succ (l1_bb); } exit_bb = region->exit; l2_dom_bb = NULL; gsi = gsi_last_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); /* Not needed in SSA form right now. */ gcc_assert (!gimple_in_ssa_p (cfun)); if (fd->collapse > 1) { int first_zero_iter = -1; basic_block zero_iter_bb = l2_bb; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, zero_iter_bb, first_zero_iter, l2_dom_bb); } if (l2_dom_bb == NULL) l2_dom_bb = l1_bb; n1 = fd->loop.n1; n2 = fd->loop.n2; if (gimple_omp_for_combined_into_p (fd->for_stmt)) { tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); expand_omp_build_assign (&gsi, fd->loop.v, fold_convert (type, n1)); if (fd->collapse > 1) { gsi_prev (&gsi); expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1); gsi_next (&gsi); } } else { expand_omp_build_assign (&gsi, fd->loop.v, fold_convert (type, fd->loop.n1)); if (fd->collapse > 1) for (i = 0; i < fd->collapse; i++) { tree itype = TREE_TYPE (fd->loops[i].v); if (POINTER_TYPE_P (itype)) itype = signed_type_for (itype); t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1); expand_omp_build_assign (&gsi, fd->loops[i].v, t); } } /* Remove the GIMPLE_OMP_FOR statement. */ gsi_remove (&gsi, true); if (!broken_loop) { /* Code to control the increment goes in the CONT_BB. */ gsi = gsi_last_bb (cont_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (fd->loop.v, fd->loop.step); else t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step); expand_omp_build_assign (&gsi, fd->loop.v, t); if (fd->collapse > 1) { i = fd->collapse - 1; if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))) { t = fold_convert (sizetype, fd->loops[i].step); t = fold_build_pointer_plus (fd->loops[i].v, t); } else { t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].step); t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, t); } expand_omp_build_assign (&gsi, fd->loops[i].v, t); for (i = fd->collapse - 1; i > 0; i--) { tree itype = TREE_TYPE (fd->loops[i].v); tree itype2 = TREE_TYPE (fd->loops[i - 1].v); if (POINTER_TYPE_P (itype2)) itype2 = signed_type_for (itype2); t = build3 (COND_EXPR, itype2, build2 (fd->loops[i].cond_code, boolean_type_node, fd->loops[i].v, fold_convert (itype, fd->loops[i].n2)), build_int_cst (itype2, 0), fold_convert (itype2, fd->loops[i - 1].step)); if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v))) t = fold_build_pointer_plus (fd->loops[i - 1].v, t); else t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t); expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t); t = build3 (COND_EXPR, itype, build2 (fd->loops[i].cond_code, boolean_type_node, fd->loops[i].v, fold_convert (itype, fd->loops[i].n2)), fd->loops[i].v, fold_convert (itype, fd->loops[i].n1)); expand_omp_build_assign (&gsi, fd->loops[i].v, t); } } /* Remove GIMPLE_OMP_CONTINUE. */ gsi_remove (&gsi, true); } /* Emit the condition in L1_BB. */ gsi = gsi_start_bb (l1_bb); t = fold_convert (type, n2); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t); cond_stmt = gimple_build_cond_empty (t); gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING); if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL)) { gsi = gsi_for_stmt (cond_stmt); gimple_regimplify_operands (cond_stmt, &gsi); } /* Remove GIMPLE_OMP_RETURN. */ gsi = gsi_last_bb (exit_bb); gsi_remove (&gsi, true); /* Connect the new blocks. */ remove_edge (FALLTHRU_EDGE (entry_bb)); if (!broken_loop) { remove_edge (BRANCH_EDGE (entry_bb)); make_edge (entry_bb, l1_bb, EDGE_FALLTHRU); e = BRANCH_EDGE (l1_bb); ne = FALLTHRU_EDGE (l1_bb); e->flags = EDGE_TRUE_VALUE; } else { single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; ne = single_succ_edge (l1_bb); e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE); } ne->flags = EDGE_FALSE_VALUE; e->probability = REG_BR_PROB_BASE * 7 / 8; ne->probability = REG_BR_PROB_BASE / 8; set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb); set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb); set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb); if (!broken_loop) { struct loop *loop = alloc_loop (); loop->header = l1_bb; loop->latch = cont_bb; add_loop (loop, l1_bb->loop_father); if (safelen == NULL_TREE) loop->safelen = INT_MAX; else { safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen); if (TREE_CODE (safelen) != INTEGER_CST) loop->safelen = 0; else if (!tree_fits_uhwi_p (safelen) || tree_to_uhwi (safelen) > INT_MAX) loop->safelen = INT_MAX; else loop->safelen = tree_to_uhwi (safelen); if (loop->safelen == 1) loop->safelen = 0; } if (simduid) { loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid); cfun->has_simduid_loops = true; } /* If not -fno-tree-loop-vectorize, hint that we want to vectorize the loop. */ if ((flag_tree_loop_vectorize || (!global_options_set.x_flag_tree_loop_vectorize && !global_options_set.x_flag_tree_vectorize)) && flag_tree_loop_optimize && loop->safelen > 1) { loop->force_vectorize = true; cfun->has_force_vectorize_loops = true; } } } /* Expand the OMP loop defined by REGION. */ static void expand_omp_for (struct omp_region *region, gimple inner_stmt) { struct omp_for_data fd; struct omp_for_data_loop *loops; loops = (struct omp_for_data_loop *) alloca (gimple_omp_for_collapse (last_stmt (region->entry)) * sizeof (struct omp_for_data_loop)); extract_omp_for_data (as_a <gomp_for *> (last_stmt (region->entry)), &fd, loops); region->sched_kind = fd.sched_kind; gcc_assert (EDGE_COUNT (region->entry->succs) == 2); BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL; FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL; if (region->cont) { gcc_assert (EDGE_COUNT (region->cont->succs) == 2); BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL; FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL; } else /* If there isn't a continue then this is a degerate case where the introduction of abnormal edges during lowering will prevent original loops from being detected. Fix that up. */ loops_state_set (LOOPS_NEED_FIXUP); if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD) expand_omp_simd (region, &fd); else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_CILKFOR) expand_cilk_for (region, &fd); else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC && !fd.have_ordered) { if (fd.chunk_size == NULL) expand_omp_for_static_nochunk (region, &fd, inner_stmt); else expand_omp_for_static_chunk (region, &fd, inner_stmt); } else { int fn_index, start_ix, next_ix; gcc_assert (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_FOR); if (fd.chunk_size == NULL && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC) fd.chunk_size = integer_zero_node; gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO); fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME) ? 3 : fd.sched_kind; fn_index += fd.have_ordered * 4; start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index; next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index; if (fd.iter_type == long_long_unsigned_type_node) { start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START - (int)BUILT_IN_GOMP_LOOP_STATIC_START); next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT); } expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix, (enum built_in_function) next_ix, inner_stmt); } if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_only_virtuals); } /* Expand code for an OpenMP sections directive. In pseudo code, we generate v = GOMP_sections_start (n); L0: switch (v) { case 0: goto L2; case 1: section 1; goto L1; case 2: ... case n: ... default: abort (); } L1: v = GOMP_sections_next (); goto L0; L2: reduction; If this is a combined parallel sections, replace the call to GOMP_sections_start with call to GOMP_sections_next. */ static void expand_omp_sections (struct omp_region *region) { tree t, u, vin = NULL, vmain, vnext, l2; unsigned len; basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb; gimple_stmt_iterator si, switch_si; gomp_sections *sections_stmt; gimple stmt; gomp_continue *cont; edge_iterator ei; edge e; struct omp_region *inner; unsigned i, casei; bool exit_reachable = region->cont != NULL; gcc_assert (region->exit != NULL); entry_bb = region->entry; l0_bb = single_succ (entry_bb); l1_bb = region->cont; l2_bb = region->exit; if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb) l2 = gimple_block_label (l2_bb); else { /* This can happen if there are reductions. */ len = EDGE_COUNT (l0_bb->succs); gcc_assert (len > 0); e = EDGE_SUCC (l0_bb, len - 1); si = gsi_last_bb (e->dest); l2 = NULL_TREE; if (gsi_end_p (si) || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION) l2 = gimple_block_label (e->dest); else FOR_EACH_EDGE (e, ei, l0_bb->succs) { si = gsi_last_bb (e->dest); if (gsi_end_p (si) || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION) { l2 = gimple_block_label (e->dest); break; } } } if (exit_reachable) default_bb = create_empty_bb (l1_bb->prev_bb); else default_bb = create_empty_bb (l0_bb); /* We will build a switch() with enough cases for all the GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work and a default case to abort if something goes wrong. */ len = EDGE_COUNT (l0_bb->succs); /* Use vec::quick_push on label_vec throughout, since we know the size in advance. */ auto_vec<tree> label_vec (len); /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the GIMPLE_OMP_SECTIONS statement. */ si = gsi_last_bb (entry_bb); sections_stmt = as_a <gomp_sections *> (gsi_stmt (si)); gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS); vin = gimple_omp_sections_control (sections_stmt); if (!is_combined_parallel (region)) { /* If we are not inside a combined parallel+sections region, call GOMP_sections_start. */ t = build_int_cst (unsigned_type_node, len - 1); u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START); stmt = gimple_build_call (u, 1, t); } else { /* Otherwise, call GOMP_sections_next. */ u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT); stmt = gimple_build_call (u, 0); } gimple_call_set_lhs (stmt, vin); gsi_insert_after (&si, stmt, GSI_SAME_STMT); gsi_remove (&si, true); /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in L0_BB. */ switch_si = gsi_last_bb (l0_bb); gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH); if (exit_reachable) { cont = as_a <gomp_continue *> (last_stmt (l1_bb)); gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (cont); vnext = gimple_omp_continue_control_def (cont); } else { vmain = vin; vnext = NULL_TREE; } t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2); label_vec.quick_push (t); i = 1; /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */ for (inner = region->inner, casei = 1; inner; inner = inner->next, i++, casei++) { basic_block s_entry_bb, s_exit_bb; /* Skip optional reduction region. */ if (inner->type == GIMPLE_OMP_ATOMIC_LOAD) { --i; --casei; continue; } s_entry_bb = inner->entry; s_exit_bb = inner->exit; t = gimple_block_label (s_entry_bb); u = build_int_cst (unsigned_type_node, casei); u = build_case_label (u, NULL, t); label_vec.quick_push (u); si = gsi_last_bb (s_entry_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION); gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si))); gsi_remove (&si, true); single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU; if (s_exit_bb == NULL) continue; si = gsi_last_bb (s_exit_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN); gsi_remove (&si, true); single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU; } /* Error handling code goes in DEFAULT_BB. */ t = gimple_block_label (default_bb); u = build_case_label (NULL, NULL, t); make_edge (l0_bb, default_bb, 0); add_bb_to_loop (default_bb, current_loops->tree_root); stmt = gimple_build_switch (vmain, u, label_vec); gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT); gsi_remove (&switch_si, true); si = gsi_start_bb (default_bb); stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0); gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING); if (exit_reachable) { tree bfn_decl; /* Code to get the next section goes in L1_BB. */ si = gsi_last_bb (l1_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE); bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT); stmt = gimple_build_call (bfn_decl, 0); gimple_call_set_lhs (stmt, vnext); gsi_insert_after (&si, stmt, GSI_SAME_STMT); gsi_remove (&si, true); single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU; } /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */ si = gsi_last_bb (l2_bb); if (gimple_omp_return_nowait_p (gsi_stmt (si))) t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT); else if (gimple_omp_return_lhs (gsi_stmt (si))) t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL); else t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END); stmt = gimple_build_call (t, 0); if (gimple_omp_return_lhs (gsi_stmt (si))) gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si))); gsi_insert_after (&si, stmt, GSI_SAME_STMT); gsi_remove (&si, true); set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb); } /* Expand code for an OpenMP single directive. We've already expanded much of the code, here we simply place the GOMP_barrier call. */ static void expand_omp_single (struct omp_region *region) { basic_block entry_bb, exit_bb; gimple_stmt_iterator si; entry_bb = region->entry; exit_bb = region->exit; si = gsi_last_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE); gsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; si = gsi_last_bb (exit_bb); if (!gimple_omp_return_nowait_p (gsi_stmt (si))) { tree t = gimple_omp_return_lhs (gsi_stmt (si)); gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT); } gsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } /* Generic expansion for OpenMP synchronization directives: master, ordered and critical. All we need to do here is remove the entry and exit markers for REGION. */ static void expand_omp_synch (struct omp_region *region) { basic_block entry_bb, exit_bb; gimple_stmt_iterator si; entry_bb = region->entry; exit_bb = region->exit; si = gsi_last_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS); gsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; if (exit_bb) { si = gsi_last_bb (exit_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN); gsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } } /* A subroutine of expand_omp_atomic. Attempt to implement the atomic operation as a normal volatile load. */ static bool expand_omp_atomic_load (basic_block load_bb, tree addr, tree loaded_val, int index) { enum built_in_function tmpbase; gimple_stmt_iterator gsi; basic_block store_bb; location_t loc; gimple stmt; tree decl, call, type, itype; gsi = gsi_last_bb (load_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD); loc = gimple_location (stmt); /* ??? If the target does not implement atomic_load_optab[mode], and mode is smaller than word size, then expand_atomic_load assumes that the load is atomic. We could avoid the builtin entirely in this case. */ tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1); decl = builtin_decl_explicit (tmpbase); if (decl == NULL_TREE) return false; type = TREE_TYPE (loaded_val); itype = TREE_TYPE (TREE_TYPE (decl)); call = build_call_expr_loc (loc, decl, 2, addr, build_int_cst (NULL, gimple_omp_atomic_seq_cst_p (stmt) ? MEMMODEL_SEQ_CST : MEMMODEL_RELAXED)); if (!useless_type_conversion_p (type, itype)) call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call); call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call); force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&gsi, true); store_bb = single_succ (load_bb); gsi = gsi_last_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE); gsi_remove (&gsi, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Attempt to implement the atomic operation as a normal volatile store. */ static bool expand_omp_atomic_store (basic_block load_bb, tree addr, tree loaded_val, tree stored_val, int index) { enum built_in_function tmpbase; gimple_stmt_iterator gsi; basic_block store_bb = single_succ (load_bb); location_t loc; gimple stmt; tree decl, call, type, itype; machine_mode imode; bool exchange; gsi = gsi_last_bb (load_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD); /* If the load value is needed, then this isn't a store but an exchange. */ exchange = gimple_omp_atomic_need_value_p (stmt); gsi = gsi_last_bb (store_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE); loc = gimple_location (stmt); /* ??? If the target does not implement atomic_store_optab[mode], and mode is smaller than word size, then expand_atomic_store assumes that the store is atomic. We could avoid the builtin entirely in this case. */ tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N); tmpbase = (enum built_in_function) ((int) tmpbase + index + 1); decl = builtin_decl_explicit (tmpbase); if (decl == NULL_TREE) return false; type = TREE_TYPE (stored_val); /* Dig out the type of the function's second argument. */ itype = TREE_TYPE (decl); itype = TYPE_ARG_TYPES (itype); itype = TREE_CHAIN (itype); itype = TREE_VALUE (itype); imode = TYPE_MODE (itype); if (exchange && !can_atomic_exchange_p (imode, true)) return false; if (!useless_type_conversion_p (itype, type)) stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val); call = build_call_expr_loc (loc, decl, 3, addr, stored_val, build_int_cst (NULL, gimple_omp_atomic_seq_cst_p (stmt) ? MEMMODEL_SEQ_CST : MEMMODEL_RELAXED)); if (exchange) { if (!useless_type_conversion_p (type, itype)) call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call); call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call); } force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&gsi, true); /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */ gsi = gsi_last_bb (load_bb); gsi_remove (&gsi, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Attempt to implement the atomic operation as a __atomic_fetch_op builtin. INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. Returns false if the expression is not of the proper form. */ static bool expand_omp_atomic_fetch_op (basic_block load_bb, tree addr, tree loaded_val, tree stored_val, int index) { enum built_in_function oldbase, newbase, tmpbase; tree decl, itype, call; tree lhs, rhs; basic_block store_bb = single_succ (load_bb); gimple_stmt_iterator gsi; gimple stmt; location_t loc; enum tree_code code; bool need_old, need_new; machine_mode imode; bool seq_cst; /* We expect to find the following sequences: load_bb: GIMPLE_OMP_ATOMIC_LOAD (tmp, mem) store_bb: val = tmp OP something; (or: something OP tmp) GIMPLE_OMP_STORE (val) ???FIXME: Allow a more flexible sequence. Perhaps use data flow to pick the statements. */ gsi = gsi_after_labels (store_bb); stmt = gsi_stmt (gsi); loc = gimple_location (stmt); if (!is_gimple_assign (stmt)) return false; gsi_next (&gsi); if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE) return false; need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi)); need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb)); seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb)); gcc_checking_assert (!need_old || !need_new); if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0)) return false; /* Check for one of the supported fetch-op operations. */ code = gimple_assign_rhs_code (stmt); switch (code) { case PLUS_EXPR: case POINTER_PLUS_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N; newbase = BUILT_IN_ATOMIC_ADD_FETCH_N; break; case MINUS_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N; newbase = BUILT_IN_ATOMIC_SUB_FETCH_N; break; case BIT_AND_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_AND_N; newbase = BUILT_IN_ATOMIC_AND_FETCH_N; break; case BIT_IOR_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_OR_N; newbase = BUILT_IN_ATOMIC_OR_FETCH_N; break; case BIT_XOR_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N; newbase = BUILT_IN_ATOMIC_XOR_FETCH_N; break; default: return false; } /* Make sure the expression is of the proper form. */ if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0)) rhs = gimple_assign_rhs2 (stmt); else if (commutative_tree_code (gimple_assign_rhs_code (stmt)) && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0)) rhs = gimple_assign_rhs1 (stmt); else return false; tmpbase = ((enum built_in_function) ((need_new ? newbase : oldbase) + index + 1)); decl = builtin_decl_explicit (tmpbase); if (decl == NULL_TREE) return false; itype = TREE_TYPE (TREE_TYPE (decl)); imode = TYPE_MODE (itype); /* We could test all of the various optabs involved, but the fact of the matter is that (with the exception of i486 vs i586 and xadd) all targets that support any atomic operaton optab also implements compare-and-swap. Let optabs.c take care of expanding any compare-and-swap loop. */ if (!can_compare_and_swap_p (imode, true)) return false; gsi = gsi_last_bb (load_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD); /* OpenMP does not imply any barrier-like semantics on its atomic ops. It only requires that the operation happen atomically. Thus we can use the RELAXED memory model. */ call = build_call_expr_loc (loc, decl, 3, addr, fold_convert_loc (loc, itype, rhs), build_int_cst (NULL, seq_cst ? MEMMODEL_SEQ_CST : MEMMODEL_RELAXED)); if (need_old || need_new) { lhs = need_old ? loaded_val : stored_val; call = fold_convert_loc (loc, TREE_TYPE (lhs), call); call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call); } else call = fold_convert_loc (loc, void_type_node, call); force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&gsi, true); gsi = gsi_last_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE); gsi_remove (&gsi, true); gsi = gsi_last_bb (store_bb); gsi_remove (&gsi, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Implement the atomic operation as: oldval = *addr; repeat: newval = rhs; // with oldval replacing *addr in rhs oldval = __sync_val_compare_and_swap (addr, oldval, newval); if (oldval != newval) goto repeat; INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. */ static bool expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb, tree addr, tree loaded_val, tree stored_val, int index) { tree loadedi, storedi, initial, new_storedi, old_vali; tree type, itype, cmpxchg, iaddr; gimple_stmt_iterator si; basic_block loop_header = single_succ (load_bb); gimple phi, stmt; edge e; enum built_in_function fncode; /* ??? We need a non-pointer interface to __atomic_compare_exchange in order to use the RELAXED memory model effectively. */ fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N + index + 1); cmpxchg = builtin_decl_explicit (fncode); if (cmpxchg == NULL_TREE) return false; type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr))); itype = TREE_TYPE (TREE_TYPE (cmpxchg)); if (!can_compare_and_swap_p (TYPE_MODE (itype), true)) return false; /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */ si = gsi_last_bb (load_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD); /* For floating-point values, we'll need to view-convert them to integers so that we can perform the atomic compare and swap. Simplify the following code by always setting up the "i"ntegral variables. */ if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type)) { tree iaddr_val; iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode, true)); iaddr_val = force_gimple_operand_gsi (&si, fold_convert (TREE_TYPE (iaddr), addr), false, NULL_TREE, true, GSI_SAME_STMT); stmt = gimple_build_assign (iaddr, iaddr_val); gsi_insert_before (&si, stmt, GSI_SAME_STMT); loadedi = create_tmp_var (itype); if (gimple_in_ssa_p (cfun)) loadedi = make_ssa_name (loadedi); } else { iaddr = addr; loadedi = loaded_val; } fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1); tree loaddecl = builtin_decl_explicit (fncode); if (loaddecl) initial = fold_convert (TREE_TYPE (TREE_TYPE (iaddr)), build_call_expr (loaddecl, 2, iaddr, build_int_cst (NULL_TREE, MEMMODEL_RELAXED))); else initial = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)), iaddr, build_int_cst (TREE_TYPE (iaddr), 0)); initial = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true, GSI_SAME_STMT); /* Move the value to the LOADEDI temporary. */ if (gimple_in_ssa_p (cfun)) { gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header))); phi = create_phi_node (loadedi, loop_header); SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)), initial); } else gsi_insert_before (&si, gimple_build_assign (loadedi, initial), GSI_SAME_STMT); if (loadedi != loaded_val) { gimple_stmt_iterator gsi2; tree x; x = build1 (VIEW_CONVERT_EXPR, type, loadedi); gsi2 = gsi_start_bb (loop_header); if (gimple_in_ssa_p (cfun)) { gassign *stmt; x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE, true, GSI_SAME_STMT); stmt = gimple_build_assign (loaded_val, x); gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT); } else { x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x); force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE, true, GSI_SAME_STMT); } } gsi_remove (&si, true); si = gsi_last_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE); if (iaddr == addr) storedi = stored_val; else storedi = force_gimple_operand_gsi (&si, build1 (VIEW_CONVERT_EXPR, itype, stored_val), true, NULL_TREE, true, GSI_SAME_STMT); /* Build the compare&swap statement. */ new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi); new_storedi = force_gimple_operand_gsi (&si, fold_convert (TREE_TYPE (loadedi), new_storedi), true, NULL_TREE, true, GSI_SAME_STMT); if (gimple_in_ssa_p (cfun)) old_vali = loadedi; else { old_vali = create_tmp_var (TREE_TYPE (loadedi)); stmt = gimple_build_assign (old_vali, loadedi); gsi_insert_before (&si, stmt, GSI_SAME_STMT); stmt = gimple_build_assign (loadedi, new_storedi); gsi_insert_before (&si, stmt, GSI_SAME_STMT); } /* Note that we always perform the comparison as an integer, even for floating point. This allows the atomic operation to properly succeed even with NaNs and -0.0. */ stmt = gimple_build_cond_empty (build2 (NE_EXPR, boolean_type_node, new_storedi, old_vali)); gsi_insert_before (&si, stmt, GSI_SAME_STMT); /* Update cfg. */ e = single_succ_edge (store_bb); e->flags &= ~EDGE_FALLTHRU; e->flags |= EDGE_FALSE_VALUE; e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE); /* Copy the new value to loadedi (we already did that before the condition if we are not in SSA). */ if (gimple_in_ssa_p (cfun)) { phi = gimple_seq_first_stmt (phi_nodes (loop_header)); SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi); } /* Remove GIMPLE_OMP_ATOMIC_STORE. */ gsi_remove (&si, true); struct loop *loop = alloc_loop (); loop->header = loop_header; loop->latch = store_bb; add_loop (loop, loop_header->loop_father); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Implement the atomic operation as: GOMP_atomic_start (); *addr = rhs; GOMP_atomic_end (); The result is not globally atomic, but works so long as all parallel references are within #pragma omp atomic directives. According to responses received from omp@openmp.org, appears to be within spec. Which makes sense, since that's how several other compilers handle this situation as well. LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're expanding. STORED_VAL is the operand of the matching GIMPLE_OMP_ATOMIC_STORE. We replace GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with loaded_val = *addr; and replace GIMPLE_OMP_ATOMIC_STORE (stored_val) with *addr = stored_val; */ static bool expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb, tree addr, tree loaded_val, tree stored_val) { gimple_stmt_iterator si; gassign *stmt; tree t; si = gsi_last_bb (load_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD); t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START); t = build_call_expr (t, 0); force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT); stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr)); gsi_insert_before (&si, stmt, GSI_SAME_STMT); gsi_remove (&si, true); si = gsi_last_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE); stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)), stored_val); gsi_insert_before (&si, stmt, GSI_SAME_STMT); t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END); t = build_call_expr (t, 0); force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&si, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand using expand_omp_atomic_fetch_op. If it failed, we try to call expand_omp_atomic_pipeline, and if it fails too, the ultimate fallback is wrapping the operation in a mutex (expand_omp_atomic_mutex). REGION is the atomic region built by build_omp_regions_1(). */ static void expand_omp_atomic (struct omp_region *region) { basic_block load_bb = region->entry, store_bb = region->exit; gomp_atomic_load *load = as_a <gomp_atomic_load *> (last_stmt (load_bb)); gomp_atomic_store *store = as_a <gomp_atomic_store *> (last_stmt (store_bb)); tree loaded_val = gimple_omp_atomic_load_lhs (load); tree addr = gimple_omp_atomic_load_rhs (load); tree stored_val = gimple_omp_atomic_store_val (store); tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr))); HOST_WIDE_INT index; /* Make sure the type is one of the supported sizes. */ index = tree_to_uhwi (TYPE_SIZE_UNIT (type)); index = exact_log2 (index); if (index >= 0 && index <= 4) { unsigned int align = TYPE_ALIGN_UNIT (type); /* __sync builtins require strict data alignment. */ if (exact_log2 (align) >= index) { /* Atomic load. */ if (loaded_val == stored_val && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT) && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD && expand_omp_atomic_load (load_bb, addr, loaded_val, index)) return; /* Atomic store. */ if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT) && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD && store_bb == single_succ (load_bb) && first_stmt (store_bb) == store && expand_omp_atomic_store (load_bb, addr, loaded_val, stored_val, index)) return; /* When possible, use specialized atomic update functions. */ if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) && store_bb == single_succ (load_bb) && expand_omp_atomic_fetch_op (load_bb, addr, loaded_val, stored_val, index)) return; /* If we don't have specialized __sync builtins, try and implement as a compare and swap loop. */ if (expand_omp_atomic_pipeline (load_bb, store_bb, addr, loaded_val, stored_val, index)) return; } } /* The ultimate fallback is wrapping the operation in a mutex. */ expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val); } /* Expand the GIMPLE_OMP_TARGET starting at REGION. */ static void expand_omp_target (struct omp_region *region) { basic_block entry_bb, exit_bb, new_bb; struct function *child_cfun; tree child_fn, block, t; gimple_stmt_iterator gsi; gomp_target *entry_stmt; gimple stmt; edge e; bool offloaded, data_region; entry_stmt = as_a <gomp_target *> (last_stmt (region->entry)); new_bb = region->entry; offloaded = is_gimple_omp_offloaded (entry_stmt); switch (gimple_omp_target_kind (entry_stmt)) { case GF_OMP_TARGET_KIND_REGION: case GF_OMP_TARGET_KIND_UPDATE: case GF_OMP_TARGET_KIND_OACC_PARALLEL: case GF_OMP_TARGET_KIND_OACC_KERNELS: case GF_OMP_TARGET_KIND_OACC_UPDATE: case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: data_region = false; break; case GF_OMP_TARGET_KIND_DATA: case GF_OMP_TARGET_KIND_OACC_DATA: data_region = true; break; default: gcc_unreachable (); } child_fn = NULL_TREE; child_cfun = NULL; if (offloaded) { child_fn = gimple_omp_target_child_fn (entry_stmt); child_cfun = DECL_STRUCT_FUNCTION (child_fn); } /* Supported by expand_omp_taskreg, but not here. */ if (child_cfun != NULL) gcc_checking_assert (!child_cfun->cfg); gcc_checking_assert (!gimple_in_ssa_p (cfun)); entry_bb = region->entry; exit_bb = region->exit; if (offloaded) { unsigned srcidx, dstidx, num; /* If the offloading region needs data sent from the parent function, then the very first statement (except possible tree profile counter updates) of the offloading body is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since &.OMP_DATA_O is passed as an argument to the child function, we need to replace it with the argument as seen by the child function. In most cases, this will end up being the identity assignment .OMP_DATA_I = .OMP_DATA_I. However, if the offloading body had a function call that has been inlined, the original PARM_DECL .OMP_DATA_I may have been converted into a different local variable. In which case, we need to keep the assignment. */ tree data_arg = gimple_omp_target_data_arg (entry_stmt); if (data_arg) { basic_block entry_succ_bb = single_succ (entry_bb); gimple_stmt_iterator gsi; tree arg; gimple tgtcopy_stmt = NULL; tree sender = TREE_VEC_ELT (data_arg, 0); for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi)) { gcc_assert (!gsi_end_p (gsi)); stmt = gsi_stmt (gsi); if (gimple_code (stmt) != GIMPLE_ASSIGN) continue; if (gimple_num_ops (stmt) == 2) { tree arg = gimple_assign_rhs1 (stmt); /* We're ignoring the subcode because we're effectively doing a STRIP_NOPS. */ if (TREE_CODE (arg) == ADDR_EXPR && TREE_OPERAND (arg, 0) == sender) { tgtcopy_stmt = stmt; break; } } } gcc_assert (tgtcopy_stmt != NULL); arg = DECL_ARGUMENTS (child_fn); gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg); gsi_remove (&gsi, true); } /* Declare local variables needed in CHILD_CFUN. */ block = DECL_INITIAL (child_fn); BLOCK_VARS (block) = vec2chain (child_cfun->local_decls); /* The gimplifier could record temporaries in the offloading block rather than in containing function's local_decls chain, which would mean cgraph missed finalizing them. Do it now. */ for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t)) if (TREE_CODE (t) == VAR_DECL && TREE_STATIC (t) && !DECL_EXTERNAL (t)) varpool_node::finalize_decl (t); DECL_SAVED_TREE (child_fn) = NULL; /* We'll create a CFG for child_fn, so no gimple body is needed. */ gimple_set_body (child_fn, NULL); TREE_USED (block) = 1; /* Reset DECL_CONTEXT on function arguments. */ for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = child_fn; /* Split ENTRY_BB at GIMPLE_*, so that it can be moved to the child function. */ gsi = gsi_last_bb (entry_bb); stmt = gsi_stmt (gsi); gcc_assert (stmt && gimple_code (stmt) == gimple_code (entry_stmt)); e = split_block (entry_bb, stmt); gsi_remove (&gsi, true); entry_bb = e->dest; single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */ if (exit_bb) { gsi = gsi_last_bb (exit_bb); gcc_assert (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); stmt = gimple_build_return (NULL); gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); gsi_remove (&gsi, true); } /* Move the offloading region into CHILD_CFUN. */ block = gimple_block (entry_stmt); new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block); if (exit_bb) single_succ_edge (new_bb)->flags = EDGE_FALLTHRU; /* When the OMP expansion process cannot guarantee an up-to-date loop tree arrange for the child function to fixup loops. */ if (loops_state_satisfies_p (LOOPS_NEED_FIXUP)) child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP; /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */ num = vec_safe_length (child_cfun->local_decls); for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++) { t = (*child_cfun->local_decls)[srcidx]; if (DECL_CONTEXT (t) == cfun->decl) continue; if (srcidx != dstidx) (*child_cfun->local_decls)[dstidx] = t; dstidx++; } if (dstidx != num) vec_safe_truncate (child_cfun->local_decls, dstidx); /* Inform the callgraph about the new function. */ DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties; cgraph_node::add_new_function (child_fn, true); #ifdef ENABLE_OFFLOADING /* Add the new function to the offload table. */ vec_safe_push (offload_funcs, child_fn); #endif /* Fix the callgraph edges for child_cfun. Those for cfun will be fixed in a following pass. */ push_cfun (child_cfun); cgraph_edge::rebuild_edges (); #ifdef ENABLE_OFFLOADING /* Prevent IPA from removing child_fn as unreachable, since there are no refs from the parent function to child_fn in offload LTO mode. */ struct cgraph_node *node = cgraph_node::get (child_fn); node->mark_force_output (); #endif /* Some EH regions might become dead, see PR34608. If pass_cleanup_cfg isn't the first pass to happen with the new child, these dead EH edges might cause problems. Clean them up now. */ if (flag_exceptions) { basic_block bb; bool changed = false; FOR_EACH_BB_FN (bb, cfun) changed |= gimple_purge_dead_eh_edges (bb); if (changed) cleanup_tree_cfg (); } pop_cfun (); } /* Emit a library call to launch the offloading region, or do data transfers. */ tree t1, t2, t3, t4, device, cond, c, clauses; enum built_in_function start_ix; location_t clause_loc; switch (gimple_omp_target_kind (entry_stmt)) { case GF_OMP_TARGET_KIND_REGION: start_ix = BUILT_IN_GOMP_TARGET; break; case GF_OMP_TARGET_KIND_DATA: start_ix = BUILT_IN_GOMP_TARGET_DATA; break; case GF_OMP_TARGET_KIND_UPDATE: start_ix = BUILT_IN_GOMP_TARGET_UPDATE; break; case GF_OMP_TARGET_KIND_OACC_PARALLEL: case GF_OMP_TARGET_KIND_OACC_KERNELS: start_ix = BUILT_IN_GOACC_PARALLEL; break; case GF_OMP_TARGET_KIND_OACC_DATA: start_ix = BUILT_IN_GOACC_DATA_START; break; case GF_OMP_TARGET_KIND_OACC_UPDATE: start_ix = BUILT_IN_GOACC_UPDATE; break; case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: start_ix = BUILT_IN_GOACC_ENTER_EXIT_DATA; break; default: gcc_unreachable (); } clauses = gimple_omp_target_clauses (entry_stmt); /* By default, the value of DEVICE is GOMP_DEVICE_ICV (let runtime library choose) and there is no conditional. */ cond = NULL_TREE; device = build_int_cst (integer_type_node, GOMP_DEVICE_ICV); c = find_omp_clause (clauses, OMP_CLAUSE_IF); if (c) cond = OMP_CLAUSE_IF_EXPR (c); c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE); if (c) { /* Even if we pass it to all library function calls, it is currently only defined/used for the OpenMP target ones. */ gcc_checking_assert (start_ix == BUILT_IN_GOMP_TARGET || start_ix == BUILT_IN_GOMP_TARGET_DATA || start_ix == BUILT_IN_GOMP_TARGET_UPDATE); device = OMP_CLAUSE_DEVICE_ID (c); clause_loc = OMP_CLAUSE_LOCATION (c); } else clause_loc = gimple_location (entry_stmt); /* Ensure 'device' is of the correct type. */ device = fold_convert_loc (clause_loc, integer_type_node, device); /* If we found the clause 'if (cond)', build (cond ? device : GOMP_DEVICE_HOST_FALLBACK). */ if (cond) { cond = gimple_boolify (cond); basic_block cond_bb, then_bb, else_bb; edge e; tree tmp_var; tmp_var = create_tmp_var (TREE_TYPE (device)); if (offloaded) e = split_block (new_bb, NULL); else { gsi = gsi_last_bb (new_bb); gsi_prev (&gsi); e = split_block (new_bb, gsi_stmt (gsi)); } cond_bb = e->src; new_bb = e->dest; remove_edge (e); then_bb = create_empty_bb (cond_bb); else_bb = create_empty_bb (then_bb); set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb); set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb); stmt = gimple_build_cond_empty (cond); gsi = gsi_last_bb (cond_bb); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); gsi = gsi_start_bb (then_bb); stmt = gimple_build_assign (tmp_var, device); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); gsi = gsi_start_bb (else_bb); stmt = gimple_build_assign (tmp_var, build_int_cst (integer_type_node, GOMP_DEVICE_HOST_FALLBACK)); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE); add_bb_to_loop (then_bb, cond_bb->loop_father); add_bb_to_loop (else_bb, cond_bb->loop_father); make_edge (then_bb, new_bb, EDGE_FALLTHRU); make_edge (else_bb, new_bb, EDGE_FALLTHRU); device = tmp_var; } gsi = gsi_last_bb (new_bb); t = gimple_omp_target_data_arg (entry_stmt); if (t == NULL) { t1 = size_zero_node; t2 = build_zero_cst (ptr_type_node); t3 = t2; t4 = t2; } else { t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1)))); t1 = size_binop (PLUS_EXPR, t1, size_int (1)); t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0)); t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1)); t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2)); } gimple g; /* The maximum number used by any start_ix, without varargs. */ auto_vec<tree, 11> args; args.quick_push (device); if (offloaded) args.quick_push (build_fold_addr_expr (child_fn)); switch (start_ix) { case BUILT_IN_GOMP_TARGET: case BUILT_IN_GOMP_TARGET_DATA: case BUILT_IN_GOMP_TARGET_UPDATE: /* This const void * is part of the current ABI, but we're not actually using it. */ args.quick_push (build_zero_cst (ptr_type_node)); break; case BUILT_IN_GOACC_DATA_START: case BUILT_IN_GOACC_ENTER_EXIT_DATA: case BUILT_IN_GOACC_PARALLEL: case BUILT_IN_GOACC_UPDATE: break; default: gcc_unreachable (); } args.quick_push (t1); args.quick_push (t2); args.quick_push (t3); args.quick_push (t4); switch (start_ix) { case BUILT_IN_GOACC_DATA_START: case BUILT_IN_GOMP_TARGET: case BUILT_IN_GOMP_TARGET_DATA: case BUILT_IN_GOMP_TARGET_UPDATE: break; case BUILT_IN_GOACC_PARALLEL: { tree t_num_gangs, t_num_workers, t_vector_length; /* Default values for num_gangs, num_workers, and vector_length. */ t_num_gangs = t_num_workers = t_vector_length = fold_convert_loc (gimple_location (entry_stmt), integer_type_node, integer_one_node); /* ..., but if present, use the value specified by the respective clause, making sure that are of the correct type. */ c = find_omp_clause (clauses, OMP_CLAUSE_NUM_GANGS); if (c) t_num_gangs = fold_convert_loc (OMP_CLAUSE_LOCATION (c), integer_type_node, OMP_CLAUSE_NUM_GANGS_EXPR (c)); c = find_omp_clause (clauses, OMP_CLAUSE_NUM_WORKERS); if (c) t_num_workers = fold_convert_loc (OMP_CLAUSE_LOCATION (c), integer_type_node, OMP_CLAUSE_NUM_WORKERS_EXPR (c)); c = find_omp_clause (clauses, OMP_CLAUSE_VECTOR_LENGTH); if (c) t_vector_length = fold_convert_loc (OMP_CLAUSE_LOCATION (c), integer_type_node, OMP_CLAUSE_VECTOR_LENGTH_EXPR (c)); args.quick_push (t_num_gangs); args.quick_push (t_num_workers); args.quick_push (t_vector_length); } /* FALLTHRU */ case BUILT_IN_GOACC_ENTER_EXIT_DATA: case BUILT_IN_GOACC_UPDATE: { tree t_async; int t_wait_idx; /* Default values for t_async. */ t_async = fold_convert_loc (gimple_location (entry_stmt), integer_type_node, build_int_cst (integer_type_node, GOMP_ASYNC_SYNC)); /* ..., but if present, use the value specified by the respective clause, making sure that is of the correct type. */ c = find_omp_clause (clauses, OMP_CLAUSE_ASYNC); if (c) t_async = fold_convert_loc (OMP_CLAUSE_LOCATION (c), integer_type_node, OMP_CLAUSE_ASYNC_EXPR (c)); args.quick_push (t_async); /* Save the index, and... */ t_wait_idx = args.length (); /* ... push a default value. */ args.quick_push (fold_convert_loc (gimple_location (entry_stmt), integer_type_node, integer_zero_node)); c = find_omp_clause (clauses, OMP_CLAUSE_WAIT); if (c) { int n = 0; for (; c; c = OMP_CLAUSE_CHAIN (c)) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_WAIT) { args.safe_push (fold_convert_loc (OMP_CLAUSE_LOCATION (c), integer_type_node, OMP_CLAUSE_WAIT_EXPR (c))); n++; } } /* Now that we know the number, replace the default value. */ args.ordered_remove (t_wait_idx); args.quick_insert (t_wait_idx, fold_convert_loc (gimple_location (entry_stmt), integer_type_node, build_int_cst (integer_type_node, n))); } } break; default: gcc_unreachable (); } g = gimple_build_call_vec (builtin_decl_explicit (start_ix), args); gimple_set_location (g, gimple_location (entry_stmt)); gsi_insert_before (&gsi, g, GSI_SAME_STMT); if (!offloaded) { g = gsi_stmt (gsi); gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET); gsi_remove (&gsi, true); } if (data_region && region->exit) { gsi = gsi_last_bb (region->exit); g = gsi_stmt (gsi); gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN); gsi_remove (&gsi, true); } } /* Expand the parallel region tree rooted at REGION. Expansion proceeds in depth-first order. Innermost regions are expanded first. This way, parallel regions that require a new function to be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any internal dependencies in their body. */ static void expand_omp (struct omp_region *region) { while (region) { location_t saved_location; gimple inner_stmt = NULL; /* First, determine whether this is a combined parallel+workshare region. */ if (region->type == GIMPLE_OMP_PARALLEL) determine_parallel_type (region); if (region->type == GIMPLE_OMP_FOR && gimple_omp_for_combined_p (last_stmt (region->entry))) inner_stmt = last_stmt (region->inner->entry); if (region->inner) expand_omp (region->inner); saved_location = input_location; if (gimple_has_location (last_stmt (region->entry))) input_location = gimple_location (last_stmt (region->entry)); switch (region->type) { case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: expand_omp_taskreg (region); break; case GIMPLE_OMP_FOR: expand_omp_for (region, inner_stmt); break; case GIMPLE_OMP_SECTIONS: expand_omp_sections (region); break; case GIMPLE_OMP_SECTION: /* Individual omp sections are handled together with their parent GIMPLE_OMP_SECTIONS region. */ break; case GIMPLE_OMP_SINGLE: expand_omp_single (region); break; case GIMPLE_OMP_MASTER: case GIMPLE_OMP_TASKGROUP: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_CRITICAL: case GIMPLE_OMP_TEAMS: expand_omp_synch (region); break; case GIMPLE_OMP_ATOMIC_LOAD: expand_omp_atomic (region); break; case GIMPLE_OMP_TARGET: expand_omp_target (region); break; default: gcc_unreachable (); } input_location = saved_location; region = region->next; } } /* Helper for build_omp_regions. Scan the dominator tree starting at block BB. PARENT is the region that contains BB. If SINGLE_TREE is true, the function ends once a single tree is built (otherwise, whole forest of OMP constructs may be built). */ static void build_omp_regions_1 (basic_block bb, struct omp_region *parent, bool single_tree) { gimple_stmt_iterator gsi; gimple stmt; basic_block son; gsi = gsi_last_bb (bb); if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi))) { struct omp_region *region; enum gimple_code code; stmt = gsi_stmt (gsi); code = gimple_code (stmt); if (code == GIMPLE_OMP_RETURN) { /* STMT is the return point out of region PARENT. Mark it as the exit point and make PARENT the immediately enclosing region. */ gcc_assert (parent); region = parent; region->exit = bb; parent = parent->outer; } else if (code == GIMPLE_OMP_ATOMIC_STORE) { /* GIMPLE_OMP_ATOMIC_STORE is analoguous to GIMPLE_OMP_RETURN, but matches with GIMPLE_OMP_ATOMIC_LOAD. */ gcc_assert (parent); gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD); region = parent; region->exit = bb; parent = parent->outer; } else if (code == GIMPLE_OMP_CONTINUE) { gcc_assert (parent); parent->cont = bb; } else if (code == GIMPLE_OMP_SECTIONS_SWITCH) { /* GIMPLE_OMP_SECTIONS_SWITCH is part of GIMPLE_OMP_SECTIONS, and we do nothing for it. */ } else { region = new_omp_region (bb, code, parent); /* Otherwise... */ if (code == GIMPLE_OMP_TARGET) { switch (gimple_omp_target_kind (stmt)) { case GF_OMP_TARGET_KIND_REGION: case GF_OMP_TARGET_KIND_DATA: case GF_OMP_TARGET_KIND_OACC_PARALLEL: case GF_OMP_TARGET_KIND_OACC_KERNELS: case GF_OMP_TARGET_KIND_OACC_DATA: break; case GF_OMP_TARGET_KIND_UPDATE: case GF_OMP_TARGET_KIND_OACC_UPDATE: case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: /* ..., other than for those stand-alone directives... */ region = NULL; break; default: gcc_unreachable (); } } /* ..., this directive becomes the parent for a new region. */ if (region) parent = region; } } if (single_tree && !parent) return; for (son = first_dom_son (CDI_DOMINATORS, bb); son; son = next_dom_son (CDI_DOMINATORS, son)) build_omp_regions_1 (son, parent, single_tree); } /* Builds the tree of OMP regions rooted at ROOT, storing it to root_omp_region. */ static void build_omp_regions_root (basic_block root) { gcc_assert (root_omp_region == NULL); build_omp_regions_1 (root, NULL, true); gcc_assert (root_omp_region != NULL); } /* Expands omp construct (and its subconstructs) starting in HEAD. */ void omp_expand_local (basic_block head) { build_omp_regions_root (head); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\nOMP region tree\n\n"); dump_omp_region (dump_file, root_omp_region, 0); fprintf (dump_file, "\n"); } remove_exit_barriers (root_omp_region); expand_omp (root_omp_region); free_omp_regions (); } /* Scan the CFG and build a tree of OMP regions. Return the root of the OMP region tree. */ static void build_omp_regions (void) { gcc_assert (root_omp_region == NULL); calculate_dominance_info (CDI_DOMINATORS); build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false); } /* Main entry point for expanding OMP-GIMPLE into runtime calls. */ static unsigned int execute_expand_omp (void) { build_omp_regions (); if (!root_omp_region) return 0; if (dump_file) { fprintf (dump_file, "\nOMP region tree\n\n"); dump_omp_region (dump_file, root_omp_region, 0); fprintf (dump_file, "\n"); } remove_exit_barriers (root_omp_region); expand_omp (root_omp_region); cleanup_tree_cfg (); free_omp_regions (); return 0; } /* OMP expansion -- the default pass, run before creation of SSA form. */ namespace { const pass_data pass_data_expand_omp = { GIMPLE_PASS, /* type */ "ompexp", /* name */ OPTGROUP_NONE, /* optinfo_flags */ TV_NONE, /* tv_id */ PROP_gimple_any, /* properties_required */ PROP_gimple_eomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_expand_omp : public gimple_opt_pass { public: pass_expand_omp (gcc::context *ctxt) : gimple_opt_pass (pass_data_expand_omp, ctxt) {} /* opt_pass methods: */ virtual unsigned int execute (function *) { bool gate = ((flag_cilkplus != 0 || flag_openacc != 0 || flag_openmp != 0 || flag_openmp_simd != 0) && !seen_error ()); /* This pass always runs, to provide PROP_gimple_eomp. But often, there is nothing to do. */ if (!gate) return 0; return execute_expand_omp (); } }; // class pass_expand_omp } // anon namespace gimple_opt_pass * make_pass_expand_omp (gcc::context *ctxt) { return new pass_expand_omp (ctxt); } namespace { const pass_data pass_data_expand_omp_ssa = { GIMPLE_PASS, /* type */ "ompexpssa", /* name */ OPTGROUP_NONE, /* optinfo_flags */ TV_NONE, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ PROP_gimple_eomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_cleanup_cfg | TODO_rebuild_alias, /* todo_flags_finish */ }; class pass_expand_omp_ssa : public gimple_opt_pass { public: pass_expand_omp_ssa (gcc::context *ctxt) : gimple_opt_pass (pass_data_expand_omp_ssa, ctxt) {} /* opt_pass methods: */ virtual bool gate (function *fun) { return !(fun->curr_properties & PROP_gimple_eomp); } virtual unsigned int execute (function *) { return execute_expand_omp (); } }; // class pass_expand_omp_ssa } // anon namespace gimple_opt_pass * make_pass_expand_omp_ssa (gcc::context *ctxt) { return new pass_expand_omp_ssa (ctxt); } /* Routines to lower OMP directives into OMP-GIMPLE. */ /* Helper function to preform, potentially COMPLEX_TYPE, operation and convert it to gimple. */ static void oacc_gimple_assign (tree dest, tree_code op, tree src, gimple_seq *seq) { gimple stmt; if (TREE_CODE (TREE_TYPE (dest)) != COMPLEX_TYPE) { stmt = gimple_build_assign (dest, op, dest, src); gimple_seq_add_stmt (seq, stmt); return; } tree t = create_tmp_var (TREE_TYPE (TREE_TYPE (dest))); tree rdest = fold_build1 (REALPART_EXPR, TREE_TYPE (TREE_TYPE (dest)), dest); gimplify_assign (t, rdest, seq); rdest = t; t = create_tmp_var (TREE_TYPE (TREE_TYPE (dest))); tree idest = fold_build1 (IMAGPART_EXPR, TREE_TYPE (TREE_TYPE (dest)), dest); gimplify_assign (t, idest, seq); idest = t; t = create_tmp_var (TREE_TYPE (TREE_TYPE (src))); tree rsrc = fold_build1 (REALPART_EXPR, TREE_TYPE (TREE_TYPE (src)), src); gimplify_assign (t, rsrc, seq); rsrc = t; t = create_tmp_var (TREE_TYPE (TREE_TYPE (src))); tree isrc = fold_build1 (IMAGPART_EXPR, TREE_TYPE (TREE_TYPE (src)), src); gimplify_assign (t, isrc, seq); isrc = t; tree r = create_tmp_var (TREE_TYPE (TREE_TYPE (dest))); tree i = create_tmp_var (TREE_TYPE (TREE_TYPE (dest))); tree result; if (op == PLUS_EXPR) { stmt = gimple_build_assign (r, op, rdest, rsrc); gimple_seq_add_stmt (seq, stmt); stmt = gimple_build_assign (i, op, idest, isrc); gimple_seq_add_stmt (seq, stmt); } else if (op == MULT_EXPR) { /* Let x = a + ib = dest, y = c + id = src. x * y = (ac - bd) + i(ad + bc) */ tree ac = create_tmp_var (TREE_TYPE (TREE_TYPE (dest))); tree bd = create_tmp_var (TREE_TYPE (TREE_TYPE (dest))); tree ad = create_tmp_var (TREE_TYPE (TREE_TYPE (dest))); tree bc = create_tmp_var (TREE_TYPE (TREE_TYPE (dest))); stmt = gimple_build_assign (ac, MULT_EXPR, rdest, rsrc); gimple_seq_add_stmt (seq, stmt); stmt = gimple_build_assign (bd, MULT_EXPR, idest, isrc); gimple_seq_add_stmt (seq, stmt); stmt = gimple_build_assign (r, MINUS_EXPR, ac, bd); gimple_seq_add_stmt (seq, stmt); stmt = gimple_build_assign (ad, MULT_EXPR, rdest, isrc); gimple_seq_add_stmt (seq, stmt); stmt = gimple_build_assign (bd, MULT_EXPR, idest, rsrc); gimple_seq_add_stmt (seq, stmt); stmt = gimple_build_assign (i, PLUS_EXPR, ad, bc); gimple_seq_add_stmt (seq, stmt); } else gcc_unreachable (); result = build2 (COMPLEX_EXPR, TREE_TYPE (dest), r, i); gimplify_assign (dest, result, seq); } /* Helper function to initialize local data for the reduction arrays. The reduction arrays need to be placed inside the calling function for accelerators, or else the host won't be able to preform the final reduction. */ static void oacc_initialize_reduction_data (tree clauses, tree nthreads, gimple_seq *stmt_seqp, omp_context *ctx) { tree c, t, oc; gimple stmt; omp_context *octx; /* Find the innermost OpenACC parallel context. */ if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET && (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_OACC_PARALLEL)) octx = ctx; else octx = ctx->outer; gcc_checking_assert (gimple_code (octx->stmt) == GIMPLE_OMP_TARGET && (gimple_omp_target_kind (octx->stmt) == GF_OMP_TARGET_KIND_OACC_PARALLEL)); /* Extract the clauses. */ oc = gimple_omp_target_clauses (octx->stmt); /* Find the last outer clause. */ for (; oc && OMP_CLAUSE_CHAIN (oc); oc = OMP_CLAUSE_CHAIN (oc)) ; /* Allocate arrays for each reduction variable. */ for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION) continue; tree var = OMP_CLAUSE_DECL (c); tree type = get_base_type (var); tree array = lookup_oacc_reduction (oacc_get_reduction_array_id (var), ctx); tree size, call; /* Calculate size of the reduction array. */ t = create_tmp_var (TREE_TYPE (nthreads)); stmt = gimple_build_assign (t, MULT_EXPR, nthreads, fold_convert (TREE_TYPE (nthreads), TYPE_SIZE_UNIT (type))); gimple_seq_add_stmt (stmt_seqp, stmt); size = create_tmp_var (sizetype); gimplify_assign (size, fold_build1 (NOP_EXPR, sizetype, t), stmt_seqp); /* Now allocate memory for it. */ call = unshare_expr (builtin_decl_explicit (BUILT_IN_ALLOCA)); stmt = gimple_build_call (call, 1, size); gimple_call_set_lhs (stmt, array); gimple_seq_add_stmt (stmt_seqp, stmt); /* Map this array into the accelerator. */ /* Add the reduction array to the list of clauses. */ tree x = array; t = build_omp_clause (gimple_location (ctx->stmt), OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (t, GOMP_MAP_FORCE_FROM); OMP_CLAUSE_DECL (t) = x; OMP_CLAUSE_CHAIN (t) = NULL; if (oc) OMP_CLAUSE_CHAIN (oc) = t; else gimple_omp_target_set_clauses (as_a <gomp_target *> (octx->stmt), t); OMP_CLAUSE_SIZE (t) = size; oc = t; } } /* Helper function to process the array of partial reductions. Nthreads indicates the number of threads. Unfortunately, GOACC_GET_NUM_THREADS cannot be used here, because nthreads on the host may be different than on the accelerator. */ static void oacc_finalize_reduction_data (tree clauses, tree nthreads, gimple_seq *stmt_seqp, omp_context *ctx) { tree c, x, var, array, loop_header, loop_body, loop_exit, type; gimple stmt; /* Create for loop. let var = the original reduction variable let array = reduction variable array for (i = 0; i < nthreads; i++) var op= array[i] */ loop_header = create_artificial_label (UNKNOWN_LOCATION); loop_body = create_artificial_label (UNKNOWN_LOCATION); loop_exit = create_artificial_label (UNKNOWN_LOCATION); /* Create and initialize an index variable. */ tree ix = create_tmp_var (sizetype); gimplify_assign (ix, fold_build1 (NOP_EXPR, sizetype, integer_zero_node), stmt_seqp); /* Insert the loop header label here. */ gimple_seq_add_stmt (stmt_seqp, gimple_build_label (loop_header)); /* Exit loop if ix >= nthreads. */ x = create_tmp_var (sizetype); gimplify_assign (x, fold_build1 (NOP_EXPR, sizetype, nthreads), stmt_seqp); stmt = gimple_build_cond (GE_EXPR, ix, x, loop_exit, loop_body); gimple_seq_add_stmt (stmt_seqp, stmt); /* Insert the loop body label here. */ gimple_seq_add_stmt (stmt_seqp, gimple_build_label (loop_body)); /* Collapse each reduction array, one element at a time. */ for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION) continue; tree_code reduction_code = OMP_CLAUSE_REDUCTION_CODE (c); /* reduction(-:var) sums up the partial results, so it acts identically to reduction(+:var). */ if (reduction_code == MINUS_EXPR) reduction_code = PLUS_EXPR; /* Set up reduction variable var. */ var = OMP_CLAUSE_DECL (c); type = get_base_type (var); array = lookup_oacc_reduction (oacc_get_reduction_array_id (OMP_CLAUSE_DECL (c)), ctx); /* Calculate the array offset. */ tree offset = create_tmp_var (sizetype); gimplify_assign (offset, TYPE_SIZE_UNIT (type), stmt_seqp); stmt = gimple_build_assign (offset, MULT_EXPR, offset, ix); gimple_seq_add_stmt (stmt_seqp, stmt); tree ptr = create_tmp_var (TREE_TYPE (array)); stmt = gimple_build_assign (ptr, POINTER_PLUS_EXPR, array, offset); gimple_seq_add_stmt (stmt_seqp, stmt); /* Extract array[ix] into mem. */ tree mem = create_tmp_var (type); gimplify_assign (mem, build_simple_mem_ref (ptr), stmt_seqp); /* Find the original reduction variable. */ if (is_reference (var)) var = build_simple_mem_ref (var); tree t = create_tmp_var (type); x = lang_hooks.decls.omp_clause_assign_op (c, t, var); gimplify_and_add (unshare_expr(x), stmt_seqp); /* var = var op mem */ switch (OMP_CLAUSE_REDUCTION_CODE (c)) { case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: t = fold_build2 (OMP_CLAUSE_REDUCTION_CODE (c), integer_type_node, t, mem); gimplify_and_add (t, stmt_seqp); break; default: /* The lhs isn't a gimple_reg when var is COMPLEX_TYPE. */ oacc_gimple_assign (t, OMP_CLAUSE_REDUCTION_CODE (c), mem, stmt_seqp); } t = fold_build1 (NOP_EXPR, TREE_TYPE (var), t); x = lang_hooks.decls.omp_clause_assign_op (c, var, t); gimplify_and_add (unshare_expr(x), stmt_seqp); } /* Increment the induction variable. */ tree one = fold_build1 (NOP_EXPR, sizetype, integer_one_node); stmt = gimple_build_assign (ix, PLUS_EXPR, ix, one); gimple_seq_add_stmt (stmt_seqp, stmt); /* Go back to the top of the loop. */ gimple_seq_add_stmt (stmt_seqp, gimple_build_goto (loop_header)); /* Place the loop exit label here. */ gimple_seq_add_stmt (stmt_seqp, gimple_build_label (loop_exit)); } /* Scan through all of the gimple stmts searching for an OMP_FOR_EXPR, and scan that for reductions. */ static void oacc_process_reduction_data (gimple_seq *body, gimple_seq *in_stmt_seqp, gimple_seq *out_stmt_seqp, omp_context *ctx) { gimple_stmt_iterator gsi; gimple_seq inner = NULL; /* A collapse clause may have inserted a new bind block. */ gsi = gsi_start (*body); while (!gsi_end_p (gsi)) { gimple stmt = gsi_stmt (gsi); if (gbind *bind_stmt = dyn_cast <gbind *> (stmt)) { inner = gimple_bind_body (bind_stmt); body = &inner; gsi = gsi_start (*body); } else if (dyn_cast <gomp_for *> (stmt)) break; else gsi_next (&gsi); } for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi)) { tree clauses, nthreads, t, c, acc_device, acc_device_host, call, enter, exit; bool reduction_found = false; gimple stmt = gsi_stmt (gsi); switch (gimple_code (stmt)) { case GIMPLE_OMP_FOR: clauses = gimple_omp_for_clauses (stmt); /* Search for a reduction clause. */ for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) { reduction_found = true; break; } if (!reduction_found) break; ctx = maybe_lookup_ctx (stmt); t = NULL_TREE; /* Extract the number of threads. */ nthreads = create_tmp_var (sizetype); t = oacc_max_threads (ctx); gimplify_assign (nthreads, t, in_stmt_seqp); /* Determine if this is kernel will be executed on the host. */ call = builtin_decl_explicit (BUILT_IN_ACC_GET_DEVICE_TYPE); acc_device = create_tmp_var (integer_type_node, ".acc_device_type"); stmt = gimple_build_call (call, 0); gimple_call_set_lhs (stmt, acc_device); gimple_seq_add_stmt (in_stmt_seqp, stmt); /* Set nthreads = 1 for ACC_DEVICE_TYPE=host. */ acc_device_host = create_tmp_var (integer_type_node, ".acc_device_host"); gimplify_assign (acc_device_host, build_int_cst (integer_type_node, GOMP_DEVICE_HOST), in_stmt_seqp); enter = create_artificial_label (UNKNOWN_LOCATION); exit = create_artificial_label (UNKNOWN_LOCATION); stmt = gimple_build_cond (EQ_EXPR, acc_device, acc_device_host, enter, exit); gimple_seq_add_stmt (in_stmt_seqp, stmt); gimple_seq_add_stmt (in_stmt_seqp, gimple_build_label (enter)); gimplify_assign (nthreads, fold_build1 (NOP_EXPR, sizetype, integer_one_node), in_stmt_seqp); gimple_seq_add_stmt (in_stmt_seqp, gimple_build_label (exit)); /* Also, set nthreads = 1 for ACC_DEVICE_TYPE=host_nonshm. */ gimplify_assign (acc_device_host, build_int_cst (integer_type_node, GOMP_DEVICE_HOST_NONSHM), in_stmt_seqp); enter = create_artificial_label (UNKNOWN_LOCATION); exit = create_artificial_label (UNKNOWN_LOCATION); stmt = gimple_build_cond (EQ_EXPR, acc_device, acc_device_host, enter, exit); gimple_seq_add_stmt (in_stmt_seqp, stmt); gimple_seq_add_stmt (in_stmt_seqp, gimple_build_label (enter)); gimplify_assign (nthreads, fold_build1 (NOP_EXPR, sizetype, integer_one_node), in_stmt_seqp); gimple_seq_add_stmt (in_stmt_seqp, gimple_build_label (exit)); oacc_initialize_reduction_data (clauses, nthreads, in_stmt_seqp, ctx); oacc_finalize_reduction_data (clauses, nthreads, out_stmt_seqp, ctx); break; default: // Scan for other directives which support reduction here. break; } } } /* If ctx is a worksharing context inside of a cancellable parallel region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN and conditional branch to parallel's cancel_label to handle cancellation in the implicit barrier. */ static void maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body) { gimple omp_return = gimple_seq_last_stmt (*body); gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN); if (gimple_omp_return_nowait_p (omp_return)) return; if (ctx->outer && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL && ctx->outer->cancellable) { tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL); tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl)); tree lhs = create_tmp_var (c_bool_type); gimple_omp_return_set_lhs (omp_return, lhs); tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION); gimple g = gimple_build_cond (NE_EXPR, lhs, fold_convert (c_bool_type, boolean_false_node), ctx->outer->cancel_label, fallthru_label); gimple_seq_add_stmt (body, g); gimple_seq_add_stmt (body, gimple_build_label (fallthru_label)); } } /* Lower the OpenMP sections directive in the current statement in GSI_P. CTX is the enclosing OMP context for the current statement. */ static void lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx) { tree block, control; gimple_stmt_iterator tgsi; gomp_sections *stmt; gimple t; gbind *new_stmt, *bind; gimple_seq ilist, dlist, olist, new_body; stmt = as_a <gomp_sections *> (gsi_stmt (*gsi_p)); push_gimplify_context (); dlist = NULL; ilist = NULL; lower_rec_input_clauses (gimple_omp_sections_clauses (stmt), &ilist, &dlist, ctx, NULL); new_body = gimple_omp_body (stmt); gimple_omp_set_body (stmt, NULL); tgsi = gsi_start (new_body); for (; !gsi_end_p (tgsi); gsi_next (&tgsi)) { omp_context *sctx; gimple sec_start; sec_start = gsi_stmt (tgsi); sctx = maybe_lookup_ctx (sec_start); gcc_assert (sctx); lower_omp (gimple_omp_body_ptr (sec_start), sctx); gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start), GSI_CONTINUE_LINKING); gimple_omp_set_body (sec_start, NULL); if (gsi_one_before_end_p (tgsi)) { gimple_seq l = NULL; lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL, &l, ctx); gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING); gimple_omp_section_set_last (sec_start); } gsi_insert_after (&tgsi, gimple_build_omp_return (false), GSI_CONTINUE_LINKING); } block = make_node (BLOCK); bind = gimple_build_bind (NULL, new_body, block); olist = NULL; lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx); block = make_node (BLOCK); new_stmt = gimple_build_bind (NULL, NULL, block); gsi_replace (gsi_p, new_stmt, true); pop_gimplify_context (new_stmt); gimple_bind_append_vars (new_stmt, ctx->block_vars); BLOCK_VARS (block) = gimple_bind_vars (bind); if (BLOCK_VARS (block)) TREE_USED (block) = 1; new_body = NULL; gimple_seq_add_seq (&new_body, ilist); gimple_seq_add_stmt (&new_body, stmt); gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ()); gimple_seq_add_stmt (&new_body, bind); control = create_tmp_var (unsigned_type_node, ".section"); t = gimple_build_omp_continue (control, control); gimple_omp_sections_set_control (stmt, control); gimple_seq_add_stmt (&new_body, t); gimple_seq_add_seq (&new_body, olist); if (ctx->cancellable) gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label)); gimple_seq_add_seq (&new_body, dlist); new_body = maybe_catch_exception (new_body); t = gimple_build_omp_return (!!find_omp_clause (gimple_omp_sections_clauses (stmt), OMP_CLAUSE_NOWAIT)); gimple_seq_add_stmt (&new_body, t); maybe_add_implicit_barrier_cancel (ctx, &new_body); gimple_bind_set_body (new_stmt, new_body); } /* A subroutine of lower_omp_single. Expand the simple form of a GIMPLE_OMP_SINGLE, without a copyprivate clause: if (GOMP_single_start ()) BODY; [ GOMP_barrier (); ] -> unless 'nowait' is present. FIXME. It may be better to delay expanding the logic of this until pass_expand_omp. The expanded logic may make the job more difficult to a synchronization analysis pass. */ static void lower_omp_single_simple (gomp_single *single_stmt, gimple_seq *pre_p) { location_t loc = gimple_location (single_stmt); tree tlabel = create_artificial_label (loc); tree flabel = create_artificial_label (loc); gimple call, cond; tree lhs, decl; decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START); lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl))); call = gimple_build_call (decl, 0); gimple_call_set_lhs (call, lhs); gimple_seq_add_stmt (pre_p, call); cond = gimple_build_cond (EQ_EXPR, lhs, fold_convert_loc (loc, TREE_TYPE (lhs), boolean_true_node), tlabel, flabel); gimple_seq_add_stmt (pre_p, cond); gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel)); gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt)); gimple_seq_add_stmt (pre_p, gimple_build_label (flabel)); } /* A subroutine of lower_omp_single. Expand the simple form of a GIMPLE_OMP_SINGLE, with a copyprivate clause: #pragma omp single copyprivate (a, b, c) Create a new structure to hold copies of 'a', 'b' and 'c' and emit: { if ((copyout_p = GOMP_single_copy_start ()) == NULL) { BODY; copyout.a = a; copyout.b = b; copyout.c = c; GOMP_single_copy_end (&copyout); } else { a = copyout_p->a; b = copyout_p->b; c = copyout_p->c; } GOMP_barrier (); } FIXME. It may be better to delay expanding the logic of this until pass_expand_omp. The expanded logic may make the job more difficult to a synchronization analysis pass. */ static void lower_omp_single_copy (gomp_single *single_stmt, gimple_seq *pre_p, omp_context *ctx) { tree ptr_type, t, l0, l1, l2, bfn_decl; gimple_seq copyin_seq; location_t loc = gimple_location (single_stmt); ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o"); ptr_type = build_pointer_type (ctx->record_type); ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i"); l0 = create_artificial_label (loc); l1 = create_artificial_label (loc); l2 = create_artificial_label (loc); bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START); t = build_call_expr_loc (loc, bfn_decl, 0); t = fold_convert_loc (loc, ptr_type, t); gimplify_assign (ctx->receiver_decl, t, pre_p); t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl, build_int_cst (ptr_type, 0)); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0), build_and_jump (&l1)); gimplify_and_add (t, pre_p); gimple_seq_add_stmt (pre_p, gimple_build_label (l0)); gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt)); copyin_seq = NULL; lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p, &copyin_seq, ctx); t = build_fold_addr_expr_loc (loc, ctx->sender_decl); bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END); t = build_call_expr_loc (loc, bfn_decl, 1, t); gimplify_and_add (t, pre_p); t = build_and_jump (&l2); gimplify_and_add (t, pre_p); gimple_seq_add_stmt (pre_p, gimple_build_label (l1)); gimple_seq_add_seq (pre_p, copyin_seq); gimple_seq_add_stmt (pre_p, gimple_build_label (l2)); } /* Expand code for an OpenMP single directive. */ static void lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx) { tree block; gimple t; gomp_single *single_stmt = as_a <gomp_single *> (gsi_stmt (*gsi_p)); gbind *bind; gimple_seq bind_body, bind_body_tail = NULL, dlist; push_gimplify_context (); block = make_node (BLOCK); bind = gimple_build_bind (NULL, NULL, block); gsi_replace (gsi_p, bind, true); bind_body = NULL; dlist = NULL; lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt), &bind_body, &dlist, ctx, NULL); lower_omp (gimple_omp_body_ptr (single_stmt), ctx); gimple_seq_add_stmt (&bind_body, single_stmt); if (ctx->record_type) lower_omp_single_copy (single_stmt, &bind_body, ctx); else lower_omp_single_simple (single_stmt, &bind_body); gimple_omp_set_body (single_stmt, NULL); gimple_seq_add_seq (&bind_body, dlist); bind_body = maybe_catch_exception (bind_body); t = gimple_build_omp_return (!!find_omp_clause (gimple_omp_single_clauses (single_stmt), OMP_CLAUSE_NOWAIT)); gimple_seq_add_stmt (&bind_body_tail, t); maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail); if (ctx->record_type) { gimple_stmt_iterator gsi = gsi_start (bind_body_tail); tree clobber = build_constructor (ctx->record_type, NULL); TREE_THIS_VOLATILE (clobber) = 1; gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl, clobber), GSI_SAME_STMT); } gimple_seq_add_seq (&bind_body, bind_body_tail); gimple_bind_set_body (bind, bind_body); pop_gimplify_context (bind); gimple_bind_append_vars (bind, ctx->block_vars); BLOCK_VARS (block) = ctx->block_vars; if (BLOCK_VARS (block)) TREE_USED (block) = 1; } /* Expand code for an OpenMP master directive. */ static void lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx) { tree block, lab = NULL, x, bfn_decl; gimple stmt = gsi_stmt (*gsi_p); gbind *bind; location_t loc = gimple_location (stmt); gimple_seq tseq; push_gimplify_context (); block = make_node (BLOCK); bind = gimple_build_bind (NULL, NULL, block); gsi_replace (gsi_p, bind, true); gimple_bind_add_stmt (bind, stmt); bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); x = build_call_expr_loc (loc, bfn_decl, 0); x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node); x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab)); tseq = NULL; gimplify_and_add (x, &tseq); gimple_bind_add_seq (bind, tseq); lower_omp (gimple_omp_body_ptr (stmt), ctx); gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt))); gimple_bind_add_seq (bind, gimple_omp_body (stmt)); gimple_omp_set_body (stmt, NULL); gimple_bind_add_stmt (bind, gimple_build_label (lab)); gimple_bind_add_stmt (bind, gimple_build_omp_return (true)); pop_gimplify_context (bind); gimple_bind_append_vars (bind, ctx->block_vars); BLOCK_VARS (block) = ctx->block_vars; } /* Expand code for an OpenMP taskgroup directive. */ static void lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx) { gimple stmt = gsi_stmt (*gsi_p); gcall *x; gbind *bind; tree block = make_node (BLOCK); bind = gimple_build_bind (NULL, NULL, block); gsi_replace (gsi_p, bind, true); gimple_bind_add_stmt (bind, stmt); x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START), 0); gimple_bind_add_stmt (bind, x); lower_omp (gimple_omp_body_ptr (stmt), ctx); gimple_bind_add_seq (bind, gimple_omp_body (stmt)); gimple_omp_set_body (stmt, NULL); gimple_bind_add_stmt (bind, gimple_build_omp_return (true)); gimple_bind_append_vars (bind, ctx->block_vars); BLOCK_VARS (block) = ctx->block_vars; } /* Expand code for an OpenMP ordered directive. */ static void lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx) { tree block; gimple stmt = gsi_stmt (*gsi_p); gcall *x; gbind *bind; push_gimplify_context (); block = make_node (BLOCK); bind = gimple_build_bind (NULL, NULL, block); gsi_replace (gsi_p, bind, true); gimple_bind_add_stmt (bind, stmt); x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START), 0); gimple_bind_add_stmt (bind, x); lower_omp (gimple_omp_body_ptr (stmt), ctx); gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt))); gimple_bind_add_seq (bind, gimple_omp_body (stmt)); gimple_omp_set_body (stmt, NULL); x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0); gimple_bind_add_stmt (bind, x); gimple_bind_add_stmt (bind, gimple_build_omp_return (true)); pop_gimplify_context (bind); gimple_bind_append_vars (bind, ctx->block_vars); BLOCK_VARS (block) = gimple_bind_vars (bind); } /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple substitution of a couple of function calls. But in the NAMED case, requires that languages coordinate a symbol name. It is therefore best put here in common code. */ static GTY(()) hash_map<tree, tree> *critical_name_mutexes; static void lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx) { tree block; tree name, lock, unlock; gomp_critical *stmt = as_a <gomp_critical *> (gsi_stmt (*gsi_p)); gbind *bind; location_t loc = gimple_location (stmt); gimple_seq tbody; name = gimple_omp_critical_name (stmt); if (name) { tree decl; if (!critical_name_mutexes) critical_name_mutexes = hash_map<tree, tree>::create_ggc (10); tree *n = critical_name_mutexes->get (name); if (n == NULL) { char *new_str; decl = create_tmp_var_raw (ptr_type_node); new_str = ACONCAT ((".gomp_critical_user_", IDENTIFIER_POINTER (name), NULL)); DECL_NAME (decl) = get_identifier (new_str); TREE_PUBLIC (decl) = 1; TREE_STATIC (decl) = 1; DECL_COMMON (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 1; varpool_node::finalize_decl (decl); critical_name_mutexes->put (name, decl); } else decl = *n; /* If '#pragma omp critical' is inside offloaded region or inside function marked as offloadable, the symbol must be marked as offloadable too. */ omp_context *octx; if (cgraph_node::get (current_function_decl)->offloadable) varpool_node::get_create (decl)->offloadable = 1; else for (octx = ctx->outer; octx; octx = octx->outer) if (is_gimple_omp_offloaded (octx->stmt)) { varpool_node::get_create (decl)->offloadable = 1; break; } lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START); lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl)); unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END); unlock = build_call_expr_loc (loc, unlock, 1, build_fold_addr_expr_loc (loc, decl)); } else { lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START); lock = build_call_expr_loc (loc, lock, 0); unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END); unlock = build_call_expr_loc (loc, unlock, 0); } push_gimplify_context (); block = make_node (BLOCK); bind = gimple_build_bind (NULL, NULL, block); gsi_replace (gsi_p, bind, true); gimple_bind_add_stmt (bind, stmt); tbody = gimple_bind_body (bind); gimplify_and_add (lock, &tbody); gimple_bind_set_body (bind, tbody); lower_omp (gimple_omp_body_ptr (stmt), ctx); gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt))); gimple_bind_add_seq (bind, gimple_omp_body (stmt)); gimple_omp_set_body (stmt, NULL); tbody = gimple_bind_body (bind); gimplify_and_add (unlock, &tbody); gimple_bind_set_body (bind, tbody); gimple_bind_add_stmt (bind, gimple_build_omp_return (true)); pop_gimplify_context (bind); gimple_bind_append_vars (bind, ctx->block_vars); BLOCK_VARS (block) = gimple_bind_vars (bind); } /* A subroutine of lower_omp_for. Generate code to emit the predicate for a lastprivate clause. Given a loop control predicate of (V cond N2), we gate the clause on (!(V cond N2)). The lowered form is appended to *DLIST, iterator initialization is appended to *BODY_P. */ static void lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p, gimple_seq *dlist, struct omp_context *ctx) { tree clauses, cond, vinit; enum tree_code cond_code; gimple_seq stmts; cond_code = fd->loop.cond_code; cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR; /* When possible, use a strict equality expression. This can let VRP type optimizations deduce the value and remove a copy. */ if (tree_fits_shwi_p (fd->loop.step)) { HOST_WIDE_INT step = tree_to_shwi (fd->loop.step); if (step == 1 || step == -1) cond_code = EQ_EXPR; } cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2); clauses = gimple_omp_for_clauses (fd->for_stmt); stmts = NULL; lower_lastprivate_clauses (clauses, cond, &stmts, ctx); if (!gimple_seq_empty_p (stmts)) { gimple_seq_add_seq (&stmts, *dlist); *dlist = stmts; /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */ vinit = fd->loop.n1; if (cond_code == EQ_EXPR && tree_fits_shwi_p (fd->loop.n2) && ! integer_zerop (fd->loop.n2)) vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0); else vinit = unshare_expr (vinit); /* Initialize the iterator variable, so that threads that don't execute any iterations don't execute the lastprivate clauses by accident. */ gimplify_assign (fd->loop.v, vinit, body_p); } } /* Lower code for an OMP loop directive. */ static void lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx) { tree *rhs_p, block; struct omp_for_data fd, *fdp = NULL; gomp_for *stmt = as_a <gomp_for *> (gsi_stmt (*gsi_p)); gbind *new_stmt; gimple_seq omp_for_body, body, dlist; size_t i; push_gimplify_context (); lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx); block = make_node (BLOCK); new_stmt = gimple_build_bind (NULL, NULL, block); /* Replace at gsi right away, so that 'stmt' is no member of a sequence anymore as we're going to add to to a different one below. */ gsi_replace (gsi_p, new_stmt, true); /* Move declaration of temporaries in the loop body before we make it go away. */ omp_for_body = gimple_omp_body (stmt); if (!gimple_seq_empty_p (omp_for_body) && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND) { gbind *inner_bind = as_a <gbind *> (gimple_seq_first_stmt (omp_for_body)); tree vars = gimple_bind_vars (inner_bind); gimple_bind_append_vars (new_stmt, vars); /* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't keep them on the inner_bind and it's block. */ gimple_bind_set_vars (inner_bind, NULL_TREE); if (gimple_bind_block (inner_bind)) BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE; } if (gimple_omp_for_combined_into_p (stmt)) { extract_omp_for_data (stmt, &fd, NULL); fdp = &fd; /* We need two temporaries with fd.loop.v type (istart/iend) and then (fd.collapse - 1) temporaries with the same type for count2 ... countN-1 vars if not constant. */ size_t count = 2; tree type = fd.iter_type; if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST) count += fd.collapse - 1; bool parallel_for = gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR; tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt); tree clauses = *pc; if (parallel_for) outerc = find_omp_clause (gimple_omp_parallel_clauses (ctx->outer->stmt), OMP_CLAUSE__LOOPTEMP_); for (i = 0; i < count; i++) { tree temp; if (parallel_for) { gcc_assert (outerc); temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer); outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc), OMP_CLAUSE__LOOPTEMP_); } else { temp = create_tmp_var (type); insert_decl_map (&ctx->outer->cb, temp, temp); } *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_); OMP_CLAUSE_DECL (*pc) = temp; pc = &OMP_CLAUSE_CHAIN (*pc); } *pc = clauses; } /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */ dlist = NULL; body = NULL; lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx, fdp); gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt)); lower_omp (gimple_omp_body_ptr (stmt), ctx); /* Lower the header expressions. At this point, we can assume that the header is of the form: #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3) We just need to make sure that VAL1, VAL2 and VAL3 are lowered using the .omp_data_s mapping, if needed. */ for (i = 0; i < gimple_omp_for_collapse (stmt); i++) { rhs_p = gimple_omp_for_initial_ptr (stmt, i); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, &body); rhs_p = gimple_omp_for_final_ptr (stmt, i); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, &body); rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, &body); } /* Once lowered, extract the bounds and clauses. */ extract_omp_for_data (stmt, &fd, NULL); lower_omp_for_lastprivate (&fd, &body, &dlist, ctx); gimple_seq_add_stmt (&body, stmt); gimple_seq_add_seq (&body, gimple_omp_body (stmt)); gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v, fd.loop.v)); /* After the loop, add exit clauses. */ lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx); if (ctx->cancellable) gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label)); gimple_seq_add_seq (&body, dlist); body = maybe_catch_exception (body); /* Region exit marker goes at the end of the loop body. */ gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait)); maybe_add_implicit_barrier_cancel (ctx, &body); pop_gimplify_context (new_stmt); gimple_bind_append_vars (new_stmt, ctx->block_vars); BLOCK_VARS (block) = gimple_bind_vars (new_stmt); if (BLOCK_VARS (block)) TREE_USED (block) = 1; gimple_bind_set_body (new_stmt, body); gimple_omp_set_body (stmt, NULL); gimple_omp_for_set_pre_body (stmt, NULL); } /* Callback for walk_stmts. Check if the current statement only contains GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */ static tree check_combined_parallel (gimple_stmt_iterator *gsi_p, bool *handled_ops_p, struct walk_stmt_info *wi) { int *info = (int *) wi->info; gimple stmt = gsi_stmt (*gsi_p); *handled_ops_p = true; switch (gimple_code (stmt)) { WALK_SUBSTMTS; case GIMPLE_OMP_FOR: case GIMPLE_OMP_SECTIONS: *info = *info == 0 ? 1 : -1; break; default: *info = -1; break; } return NULL; } struct omp_taskcopy_context { /* This field must be at the beginning, as we do "inheritance": Some callback functions for tree-inline.c (e.g., omp_copy_decl) receive a copy_body_data pointer that is up-casted to an omp_context pointer. */ copy_body_data cb; omp_context *ctx; }; static tree task_copyfn_copy_decl (tree var, copy_body_data *cb) { struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb; if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var)) return create_tmp_var (TREE_TYPE (var)); return var; } static tree task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type) { tree name, new_fields = NULL, type, f; type = lang_hooks.types.make_type (RECORD_TYPE); name = DECL_NAME (TYPE_NAME (orig_type)); name = build_decl (gimple_location (tcctx->ctx->stmt), TYPE_DECL, name, type); TYPE_NAME (type) = name; for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f)) { tree new_f = copy_node (f); DECL_CONTEXT (new_f) = type; TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb); TREE_CHAIN (new_f) = new_fields; walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL); walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL); walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r, &tcctx->cb, NULL); new_fields = new_f; tcctx->cb.decl_map->put (f, new_f); } TYPE_FIELDS (type) = nreverse (new_fields); layout_type (type); return type; } /* Create task copyfn. */ static void create_task_copyfn (gomp_task *task_stmt, omp_context *ctx) { struct function *child_cfun; tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl; tree record_type, srecord_type, bind, list; bool record_needs_remap = false, srecord_needs_remap = false; splay_tree_node n; struct omp_taskcopy_context tcctx; location_t loc = gimple_location (task_stmt); child_fn = gimple_omp_task_copy_fn (task_stmt); child_cfun = DECL_STRUCT_FUNCTION (child_fn); gcc_assert (child_cfun->cfg == NULL); DECL_SAVED_TREE (child_fn) = alloc_stmt_list (); /* Reset DECL_CONTEXT on function arguments. */ for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = child_fn; /* Populate the function. */ push_gimplify_context (); push_cfun (child_cfun); bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; list = NULL; DECL_SAVED_TREE (child_fn) = bind; DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt); /* Remap src and dst argument types if needed. */ record_type = ctx->record_type; srecord_type = ctx->srecord_type; for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f)) if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn)) { record_needs_remap = true; break; } for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f)) if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn)) { srecord_needs_remap = true; break; } if (record_needs_remap || srecord_needs_remap) { memset (&tcctx, '\0', sizeof (tcctx)); tcctx.cb.src_fn = ctx->cb.src_fn; tcctx.cb.dst_fn = child_fn; tcctx.cb.src_node = cgraph_node::get (tcctx.cb.src_fn); gcc_checking_assert (tcctx.cb.src_node); tcctx.cb.dst_node = tcctx.cb.src_node; tcctx.cb.src_cfun = ctx->cb.src_cfun; tcctx.cb.copy_decl = task_copyfn_copy_decl; tcctx.cb.eh_lp_nr = 0; tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE; tcctx.cb.decl_map = new hash_map<tree, tree>; tcctx.ctx = ctx; if (record_needs_remap) record_type = task_copyfn_remap_type (&tcctx, record_type); if (srecord_needs_remap) srecord_type = task_copyfn_remap_type (&tcctx, srecord_type); } else tcctx.cb.decl_map = NULL; arg = DECL_ARGUMENTS (child_fn); TREE_TYPE (arg) = build_pointer_type (record_type); sarg = DECL_CHAIN (arg); TREE_TYPE (sarg) = build_pointer_type (srecord_type); /* First pass: initialize temporaries used in record_type and srecord_type sizes and field offsets. */ if (tcctx.cb.decl_map) for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE) { tree *p; decl = OMP_CLAUSE_DECL (c); p = tcctx.cb.decl_map->get (decl); if (p == NULL) continue; n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl); sf = (tree) n->value; sf = *tcctx.cb.decl_map->get (sf); src = build_simple_mem_ref_loc (loc, sarg); src = omp_build_component_ref (src, sf); t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src); append_to_statement_list (t, &list); } /* Second pass: copy shared var pointers and copy construct non-VLA firstprivate vars. */ for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c)) switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_SHARED: decl = OMP_CLAUSE_DECL (c); n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl); if (n == NULL) break; f = (tree) n->value; if (tcctx.cb.decl_map) f = *tcctx.cb.decl_map->get (f); n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl); sf = (tree) n->value; if (tcctx.cb.decl_map) sf = *tcctx.cb.decl_map->get (sf); src = build_simple_mem_ref_loc (loc, sarg); src = omp_build_component_ref (src, sf); dst = build_simple_mem_ref_loc (loc, arg); dst = omp_build_component_ref (dst, f); t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src); append_to_statement_list (t, &list); break; case OMP_CLAUSE_FIRSTPRIVATE: decl = OMP_CLAUSE_DECL (c); if (is_variable_sized (decl)) break; n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl); if (n == NULL) break; f = (tree) n->value; if (tcctx.cb.decl_map) f = *tcctx.cb.decl_map->get (f); n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl); if (n != NULL) { sf = (tree) n->value; if (tcctx.cb.decl_map) sf = *tcctx.cb.decl_map->get (sf); src = build_simple_mem_ref_loc (loc, sarg); src = omp_build_component_ref (src, sf); if (use_pointer_for_field (decl, NULL) || is_reference (decl)) src = build_simple_mem_ref_loc (loc, src); } else src = decl; dst = build_simple_mem_ref_loc (loc, arg); dst = omp_build_component_ref (dst, f); t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src); append_to_statement_list (t, &list); break; case OMP_CLAUSE_PRIVATE: if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c)) break; decl = OMP_CLAUSE_DECL (c); n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl); f = (tree) n->value; if (tcctx.cb.decl_map) f = *tcctx.cb.decl_map->get (f); n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl); if (n != NULL) { sf = (tree) n->value; if (tcctx.cb.decl_map) sf = *tcctx.cb.decl_map->get (sf); src = build_simple_mem_ref_loc (loc, sarg); src = omp_build_component_ref (src, sf); if (use_pointer_for_field (decl, NULL)) src = build_simple_mem_ref_loc (loc, src); } else src = decl; dst = build_simple_mem_ref_loc (loc, arg); dst = omp_build_component_ref (dst, f); t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src); append_to_statement_list (t, &list); break; default: break; } /* Last pass: handle VLA firstprivates. */ if (tcctx.cb.decl_map) for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE) { tree ind, ptr, df; decl = OMP_CLAUSE_DECL (c); if (!is_variable_sized (decl)) continue; n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl); if (n == NULL) continue; f = (tree) n->value; f = *tcctx.cb.decl_map->get (f); gcc_assert (DECL_HAS_VALUE_EXPR_P (decl)); ind = DECL_VALUE_EXPR (decl); gcc_assert (TREE_CODE (ind) == INDIRECT_REF); gcc_assert (DECL_P (TREE_OPERAND (ind, 0))); n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) TREE_OPERAND (ind, 0)); sf = (tree) n->value; sf = *tcctx.cb.decl_map->get (sf); src = build_simple_mem_ref_loc (loc, sarg); src = omp_build_component_ref (src, sf); src = build_simple_mem_ref_loc (loc, src); dst = build_simple_mem_ref_loc (loc, arg); dst = omp_build_component_ref (dst, f); t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src); append_to_statement_list (t, &list); n = splay_tree_lookup (ctx->field_map, (splay_tree_key) TREE_OPERAND (ind, 0)); df = (tree) n->value; df = *tcctx.cb.decl_map->get (df); ptr = build_simple_mem_ref_loc (loc, arg); ptr = omp_build_component_ref (ptr, df); t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr, build_fold_addr_expr_loc (loc, dst)); append_to_statement_list (t, &list); } t = build1 (RETURN_EXPR, void_type_node, NULL); append_to_statement_list (t, &list); if (tcctx.cb.decl_map) delete tcctx.cb.decl_map; pop_gimplify_context (NULL); BIND_EXPR_BODY (bind) = list; pop_cfun (); } static void lower_depend_clauses (gimple stmt, gimple_seq *iseq, gimple_seq *oseq) { tree c, clauses; gimple g; size_t n_in = 0, n_out = 0, idx = 2, i; clauses = find_omp_clause (gimple_omp_task_clauses (stmt), OMP_CLAUSE_DEPEND); gcc_assert (clauses); for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND) switch (OMP_CLAUSE_DEPEND_KIND (c)) { case OMP_CLAUSE_DEPEND_IN: n_in++; break; case OMP_CLAUSE_DEPEND_OUT: case OMP_CLAUSE_DEPEND_INOUT: n_out++; break; default: gcc_unreachable (); } tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2); tree array = create_tmp_var (type); tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE, NULL_TREE); g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out)); gimple_seq_add_stmt (iseq, g); r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE, NULL_TREE); g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out)); gimple_seq_add_stmt (iseq, g); for (i = 0; i < 2; i++) { if ((i ? n_in : n_out) == 0) continue; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i)) { tree t = OMP_CLAUSE_DECL (c); t = fold_convert (ptr_type_node, t); gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue); r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++), NULL_TREE, NULL_TREE); g = gimple_build_assign (r, t); gimple_seq_add_stmt (iseq, g); } } tree *p = gimple_omp_task_clauses_ptr (stmt); c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND); OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array); OMP_CLAUSE_CHAIN (c) = *p; *p = c; tree clobber = build_constructor (type, NULL); TREE_THIS_VOLATILE (clobber) = 1; g = gimple_build_assign (array, clobber); gimple_seq_add_stmt (oseq, g); } /* Lower the OpenMP parallel or task directive in the current statement in GSI_P. CTX holds context information for the directive. */ static void lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx) { tree clauses; tree child_fn, t; gimple stmt = gsi_stmt (*gsi_p); gbind *par_bind, *bind, *dep_bind = NULL; gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body; location_t loc = gimple_location (stmt); clauses = gimple_omp_taskreg_clauses (stmt); par_bind = as_a <gbind *> (gimple_seq_first_stmt (gimple_omp_body (stmt))); par_body = gimple_bind_body (par_bind); child_fn = ctx->cb.dst_fn; if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL && !gimple_omp_parallel_combined_p (stmt)) { struct walk_stmt_info wi; int ws_num = 0; memset (&wi, 0, sizeof (wi)); wi.info = &ws_num; wi.val_only = true; walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi); if (ws_num == 1) gimple_omp_parallel_set_combined_p (stmt, true); } gimple_seq dep_ilist = NULL; gimple_seq dep_olist = NULL; if (gimple_code (stmt) == GIMPLE_OMP_TASK && find_omp_clause (clauses, OMP_CLAUSE_DEPEND)) { push_gimplify_context (); dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK)); lower_depend_clauses (stmt, &dep_ilist, &dep_olist); } if (ctx->srecord_type) create_task_copyfn (as_a <gomp_task *> (stmt), ctx); push_gimplify_context (); par_olist = NULL; par_ilist = NULL; par_rlist = NULL; lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL); lower_omp (&par_body, ctx); if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL) lower_reduction_clauses (clauses, &par_rlist, ctx); /* Declare all the variables created by mapping and the variables declared in the scope of the parallel body. */ record_vars_into (ctx->block_vars, child_fn); record_vars_into (gimple_bind_vars (par_bind), child_fn); if (ctx->record_type) { ctx->sender_decl = create_tmp_var (ctx->srecord_type ? ctx->srecord_type : ctx->record_type, ".omp_data_o"); DECL_NAMELESS (ctx->sender_decl) = 1; TREE_ADDRESSABLE (ctx->sender_decl) = 1; gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl); } olist = NULL; ilist = NULL; lower_send_clauses (clauses, &ilist, &olist, ctx); lower_send_shared_vars (&ilist, &olist, ctx); if (ctx->record_type) { tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL); TREE_THIS_VOLATILE (clobber) = 1; gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl, clobber)); } /* Once all the expansions are done, sequence all the different fragments inside gimple_omp_body. */ new_body = NULL; if (ctx->record_type) { t = build_fold_addr_expr_loc (loc, ctx->sender_decl); /* fixup_child_record_type might have changed receiver_decl's type. */ t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t); gimple_seq_add_stmt (&new_body, gimple_build_assign (ctx->receiver_decl, t)); } gimple_seq_add_seq (&new_body, par_ilist); gimple_seq_add_seq (&new_body, par_body); gimple_seq_add_seq (&new_body, par_rlist); if (ctx->cancellable) gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label)); gimple_seq_add_seq (&new_body, par_olist); new_body = maybe_catch_exception (new_body); gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false)); gimple_omp_set_body (stmt, new_body); bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind)); gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true); gimple_bind_add_seq (bind, ilist); gimple_bind_add_stmt (bind, stmt); gimple_bind_add_seq (bind, olist); pop_gimplify_context (NULL); if (dep_bind) { gimple_bind_add_seq (dep_bind, dep_ilist); gimple_bind_add_stmt (dep_bind, bind); gimple_bind_add_seq (dep_bind, dep_olist); pop_gimplify_context (dep_bind); } } /* Lower the GIMPLE_OMP_TARGET in the current statement in GSI_P. CTX holds context information for the directive. */ static void lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx) { tree clauses; tree child_fn, t, c; gomp_target *stmt = as_a <gomp_target *> (gsi_stmt (*gsi_p)); gbind *tgt_bind, *bind; gimple_seq tgt_body, olist, ilist, orlist, irlist, new_body; location_t loc = gimple_location (stmt); bool offloaded, data_region; unsigned int map_cnt = 0; offloaded = is_gimple_omp_offloaded (stmt); switch (gimple_omp_target_kind (stmt)) { case GF_OMP_TARGET_KIND_REGION: case GF_OMP_TARGET_KIND_UPDATE: case GF_OMP_TARGET_KIND_OACC_PARALLEL: case GF_OMP_TARGET_KIND_OACC_KERNELS: case GF_OMP_TARGET_KIND_OACC_UPDATE: case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: data_region = false; break; case GF_OMP_TARGET_KIND_DATA: case GF_OMP_TARGET_KIND_OACC_DATA: data_region = true; break; default: gcc_unreachable (); } clauses = gimple_omp_target_clauses (stmt); tgt_bind = NULL; tgt_body = NULL; if (offloaded) { tgt_bind = gimple_seq_first_stmt_as_a_bind (gimple_omp_body (stmt)); tgt_body = gimple_bind_body (tgt_bind); } else if (data_region) tgt_body = gimple_omp_body (stmt); child_fn = ctx->cb.dst_fn; push_gimplify_context (); irlist = NULL; orlist = NULL; if (offloaded && is_gimple_omp_oacc (stmt)) oacc_process_reduction_data (&tgt_body, &irlist, &orlist, ctx); for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) switch (OMP_CLAUSE_CODE (c)) { tree var, x; default: break; case OMP_CLAUSE_MAP: #ifdef ENABLE_CHECKING /* First check what we're prepared to handle in the following. */ switch (OMP_CLAUSE_MAP_KIND (c)) { case GOMP_MAP_ALLOC: case GOMP_MAP_TO: case GOMP_MAP_FROM: case GOMP_MAP_TOFROM: case GOMP_MAP_POINTER: case GOMP_MAP_TO_PSET: break; case GOMP_MAP_FORCE_ALLOC: case GOMP_MAP_FORCE_TO: case GOMP_MAP_FORCE_FROM: case GOMP_MAP_FORCE_TOFROM: case GOMP_MAP_FORCE_PRESENT: case GOMP_MAP_FORCE_DEALLOC: case GOMP_MAP_FORCE_DEVICEPTR: gcc_assert (is_gimple_omp_oacc (stmt)); break; default: gcc_unreachable (); } #endif /* FALLTHRU */ case OMP_CLAUSE_TO: case OMP_CLAUSE_FROM: var = OMP_CLAUSE_DECL (c); if (!DECL_P (var)) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)) map_cnt++; continue; } if (DECL_SIZE (var) && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST) { tree var2 = DECL_VALUE_EXPR (var); gcc_assert (TREE_CODE (var2) == INDIRECT_REF); var2 = TREE_OPERAND (var2, 0); gcc_assert (DECL_P (var2)); var = var2; } if (!maybe_lookup_field (var, ctx)) continue; if (offloaded) { x = build_receiver_ref (var, true, ctx); tree new_var = lookup_decl (var, ctx); if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE) x = build_simple_mem_ref (x); SET_DECL_VALUE_EXPR (new_var, x); DECL_HAS_VALUE_EXPR_P (new_var) = 1; } map_cnt++; } if (offloaded) { target_nesting_level++; lower_omp (&tgt_body, ctx); target_nesting_level--; } else if (data_region) lower_omp (&tgt_body, ctx); if (offloaded) { /* Declare all the variables created by mapping and the variables declared in the scope of the target body. */ record_vars_into (ctx->block_vars, child_fn); record_vars_into (gimple_bind_vars (tgt_bind), child_fn); } olist = NULL; ilist = NULL; if (ctx->record_type) { ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_data_arr"); DECL_NAMELESS (ctx->sender_decl) = 1; TREE_ADDRESSABLE (ctx->sender_decl) = 1; t = make_tree_vec (3); TREE_VEC_ELT (t, 0) = ctx->sender_decl; TREE_VEC_ELT (t, 1) = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt), ".omp_data_sizes"); DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1; TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1; TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1; tree tkind_type; int talign_shift; if (is_gimple_omp_oacc (stmt)) { tkind_type = short_unsigned_type_node; talign_shift = 8; } else { tkind_type = unsigned_char_type_node; talign_shift = 3; } TREE_VEC_ELT (t, 2) = create_tmp_var (build_array_type_nelts (tkind_type, map_cnt), ".omp_data_kinds"); DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1; TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1; TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1; gimple_omp_target_set_data_arg (stmt, t); vec<constructor_elt, va_gc> *vsize; vec<constructor_elt, va_gc> *vkind; vec_alloc (vsize, map_cnt); vec_alloc (vkind, map_cnt); unsigned int map_idx = 0; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) switch (OMP_CLAUSE_CODE (c)) { tree ovar, nc; default: break; case OMP_CLAUSE_MAP: case OMP_CLAUSE_TO: case OMP_CLAUSE_FROM: nc = c; ovar = OMP_CLAUSE_DECL (c); if (!DECL_P (ovar)) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)) { gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c)) == get_base_address (ovar)); nc = OMP_CLAUSE_CHAIN (c); ovar = OMP_CLAUSE_DECL (nc); } else { tree x = build_sender_ref (ovar, ctx); tree v = build_fold_addr_expr_with_type (ovar, ptr_type_node); gimplify_assign (x, v, &ilist); nc = NULL_TREE; } } else { if (DECL_SIZE (ovar) && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST) { tree ovar2 = DECL_VALUE_EXPR (ovar); gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF); ovar2 = TREE_OPERAND (ovar2, 0); gcc_assert (DECL_P (ovar2)); ovar = ovar2; } if (!maybe_lookup_field (ovar, ctx)) continue; } unsigned int talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar)); if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign) talign = DECL_ALIGN_UNIT (ovar); if (nc) { tree var = lookup_decl_in_outer_ctx (ovar, ctx); tree x = build_sender_ref (ovar, ctx); if (maybe_lookup_oacc_reduction (var, ctx)) { gcc_checking_assert (offloaded && is_gimple_omp_oacc (stmt)); gimplify_assign (x, var, &ilist); } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE) { gcc_assert (offloaded); tree avar = create_tmp_var (TREE_TYPE (TREE_TYPE (x))); mark_addressable (avar); gimplify_assign (avar, build_fold_addr_expr (var), &ilist); talign = DECL_ALIGN_UNIT (avar); avar = build_fold_addr_expr (avar); gimplify_assign (x, avar, &ilist); } else if (is_gimple_reg (var)) { gcc_assert (offloaded); tree avar = create_tmp_var (TREE_TYPE (var)); mark_addressable (avar); enum gomp_map_kind map_kind = OMP_CLAUSE_MAP_KIND (c); if (GOMP_MAP_COPY_TO_P (map_kind) || map_kind == GOMP_MAP_POINTER || map_kind == GOMP_MAP_TO_PSET || map_kind == GOMP_MAP_FORCE_DEVICEPTR) gimplify_assign (avar, var, &ilist); avar = build_fold_addr_expr (avar); gimplify_assign (x, avar, &ilist); if ((GOMP_MAP_COPY_FROM_P (map_kind) || map_kind == GOMP_MAP_FORCE_DEVICEPTR) && !TYPE_READONLY (TREE_TYPE (var))) { x = build_sender_ref (ovar, ctx); x = build_simple_mem_ref (x); gimplify_assign (var, x, &olist); } } else { var = build_fold_addr_expr (var); gimplify_assign (x, var, &ilist); } } tree s = OMP_CLAUSE_SIZE (c); if (s == NULL_TREE) s = TYPE_SIZE_UNIT (TREE_TYPE (ovar)); s = fold_convert (size_type_node, s); tree purpose = size_int (map_idx++); CONSTRUCTOR_APPEND_ELT (vsize, purpose, s); if (TREE_CODE (s) != INTEGER_CST) TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0; unsigned HOST_WIDE_INT tkind; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_MAP: tkind = OMP_CLAUSE_MAP_KIND (c); break; case OMP_CLAUSE_TO: tkind = GOMP_MAP_TO; break; case OMP_CLAUSE_FROM: tkind = GOMP_MAP_FROM; break; default: gcc_unreachable (); } gcc_checking_assert (tkind < (HOST_WIDE_INT_C (1U) << talign_shift)); talign = ceil_log2 (talign); tkind |= talign << talign_shift; gcc_checking_assert (tkind <= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type))); CONSTRUCTOR_APPEND_ELT (vkind, purpose, build_int_cstu (tkind_type, tkind)); if (nc && nc != c) c = nc; } gcc_assert (map_idx == map_cnt); DECL_INITIAL (TREE_VEC_ELT (t, 1)) = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize); DECL_INITIAL (TREE_VEC_ELT (t, 2)) = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind); if (!TREE_STATIC (TREE_VEC_ELT (t, 1))) { gimple_seq initlist = NULL; force_gimple_operand (build1 (DECL_EXPR, void_type_node, TREE_VEC_ELT (t, 1)), &initlist, true, NULL_TREE); gimple_seq_add_seq (&ilist, initlist); tree clobber = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), NULL); TREE_THIS_VOLATILE (clobber) = 1; gimple_seq_add_stmt (&olist, gimple_build_assign (TREE_VEC_ELT (t, 1), clobber)); } tree clobber = build_constructor (ctx->record_type, NULL); TREE_THIS_VOLATILE (clobber) = 1; gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl, clobber)); } /* Once all the expansions are done, sequence all the different fragments inside gimple_omp_body. */ new_body = NULL; if (offloaded && ctx->record_type) { t = build_fold_addr_expr_loc (loc, ctx->sender_decl); /* fixup_child_record_type might have changed receiver_decl's type. */ t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t); gimple_seq_add_stmt (&new_body, gimple_build_assign (ctx->receiver_decl, t)); } if (offloaded) { gimple_seq_add_seq (&new_body, tgt_body); new_body = maybe_catch_exception (new_body); } else if (data_region) new_body = tgt_body; if (offloaded || data_region) { gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false)); gimple_omp_set_body (stmt, new_body); } bind = gimple_build_bind (NULL, NULL, tgt_bind ? gimple_bind_block (tgt_bind) : NULL_TREE); gsi_replace (gsi_p, bind, true); gimple_bind_add_seq (bind, irlist); gimple_bind_add_seq (bind, ilist); gimple_bind_add_stmt (bind, stmt); gimple_bind_add_seq (bind, olist); gimple_bind_add_seq (bind, orlist); pop_gimplify_context (NULL); } /* Expand code for an OpenMP teams directive. */ static void lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx) { gomp_teams *teams_stmt = as_a <gomp_teams *> (gsi_stmt (*gsi_p)); push_gimplify_context (); tree block = make_node (BLOCK); gbind *bind = gimple_build_bind (NULL, NULL, block); gsi_replace (gsi_p, bind, true); gimple_seq bind_body = NULL; gimple_seq dlist = NULL; gimple_seq olist = NULL; tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt), OMP_CLAUSE_NUM_TEAMS); if (num_teams == NULL_TREE) num_teams = build_int_cst (unsigned_type_node, 0); else { num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams); num_teams = fold_convert (unsigned_type_node, num_teams); gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue); } tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt), OMP_CLAUSE_THREAD_LIMIT); if (thread_limit == NULL_TREE) thread_limit = build_int_cst (unsigned_type_node, 0); else { thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit); thread_limit = fold_convert (unsigned_type_node, thread_limit); gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val, fb_rvalue); } lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt), &bind_body, &dlist, ctx, NULL); lower_omp (gimple_omp_body_ptr (teams_stmt), ctx); lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx); gimple_seq_add_stmt (&bind_body, teams_stmt); location_t loc = gimple_location (teams_stmt); tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS); gimple call = gimple_build_call (decl, 2, num_teams, thread_limit); gimple_set_location (call, loc); gimple_seq_add_stmt (&bind_body, call); gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt)); gimple_omp_set_body (teams_stmt, NULL); gimple_seq_add_seq (&bind_body, olist); gimple_seq_add_seq (&bind_body, dlist); gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true)); gimple_bind_set_body (bind, bind_body); pop_gimplify_context (bind); gimple_bind_append_vars (bind, ctx->block_vars); BLOCK_VARS (block) = ctx->block_vars; if (BLOCK_VARS (block)) TREE_USED (block) = 1; } /* Callback for lower_omp_1. Return non-NULL if *tp needs to be regimplified. If DATA is non-NULL, lower_omp_1 is outside of OMP context, but with task_shared_vars set. */ static tree lower_omp_regimplify_p (tree *tp, int *walk_subtrees, void *data) { tree t = *tp; /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */ if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t)) return t; if (task_shared_vars && DECL_P (t) && bitmap_bit_p (task_shared_vars, DECL_UID (t))) return t; /* If a global variable has been privatized, TREE_CONSTANT on ADDR_EXPR might be wrong. */ if (data == NULL && TREE_CODE (t) == ADDR_EXPR) recompute_tree_invariant_for_addr_expr (t); *walk_subtrees = !TYPE_P (t) && !DECL_P (t); return NULL_TREE; } static void lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx) { gimple stmt = gsi_stmt (*gsi_p); struct walk_stmt_info wi; gcall *call_stmt; if (gimple_has_location (stmt)) input_location = gimple_location (stmt); if (task_shared_vars) memset (&wi, '\0', sizeof (wi)); /* If we have issued syntax errors, avoid doing any heavy lifting. Just replace the OMP directives with a NOP to avoid confusing RTL expansion. */ if (seen_error () && is_gimple_omp (stmt)) { gsi_replace (gsi_p, gimple_build_nop (), true); return; } switch (gimple_code (stmt)) { case GIMPLE_COND: { gcond *cond_stmt = as_a <gcond *> (stmt); if ((ctx || task_shared_vars) && (walk_tree (gimple_cond_lhs_ptr (cond_stmt), lower_omp_regimplify_p, ctx ? NULL : &wi, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))) gimple_regimplify_operands (cond_stmt, gsi_p); } break; case GIMPLE_CATCH: lower_omp (gimple_catch_handler_ptr (as_a <gcatch *> (stmt)), ctx); break; case GIMPLE_EH_FILTER: lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx); break; case GIMPLE_TRY: lower_omp (gimple_try_eval_ptr (stmt), ctx); lower_omp (gimple_try_cleanup_ptr (stmt), ctx); break; case GIMPLE_TRANSACTION: lower_omp (gimple_transaction_body_ptr ( as_a <gtransaction *> (stmt)), ctx); break; case GIMPLE_BIND: lower_omp (gimple_bind_body_ptr (as_a <gbind *> (stmt)), ctx); break; case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: ctx = maybe_lookup_ctx (stmt); gcc_assert (ctx); if (ctx->cancellable) ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION); lower_omp_taskreg (gsi_p, ctx); break; case GIMPLE_OMP_FOR: ctx = maybe_lookup_ctx (stmt); gcc_assert (ctx); if (ctx->cancellable) ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION); lower_omp_for (gsi_p, ctx); break; case GIMPLE_OMP_SECTIONS: ctx = maybe_lookup_ctx (stmt); gcc_assert (ctx); if (ctx->cancellable) ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION); lower_omp_sections (gsi_p, ctx); break; case GIMPLE_OMP_SINGLE: ctx = maybe_lookup_ctx (stmt); gcc_assert (ctx); lower_omp_single (gsi_p, ctx); break; case GIMPLE_OMP_MASTER: ctx = maybe_lookup_ctx (stmt); gcc_assert (ctx); lower_omp_master (gsi_p, ctx); break; case GIMPLE_OMP_TASKGROUP: ctx = maybe_lookup_ctx (stmt); gcc_assert (ctx); lower_omp_taskgroup (gsi_p, ctx); break; case GIMPLE_OMP_ORDERED: ctx = maybe_lookup_ctx (stmt); gcc_assert (ctx); lower_omp_ordered (gsi_p, ctx); break; case GIMPLE_OMP_CRITICAL: ctx = maybe_lookup_ctx (stmt); gcc_assert (ctx); lower_omp_critical (gsi_p, ctx); break; case GIMPLE_OMP_ATOMIC_LOAD: if ((ctx || task_shared_vars) && walk_tree (gimple_omp_atomic_load_rhs_ptr ( as_a <gomp_atomic_load *> (stmt)), lower_omp_regimplify_p, ctx ? NULL : &wi, NULL)) gimple_regimplify_operands (stmt, gsi_p); break; case GIMPLE_OMP_TARGET: ctx = maybe_lookup_ctx (stmt); gcc_assert (ctx); lower_omp_target (gsi_p, ctx); break; case GIMPLE_OMP_TEAMS: ctx = maybe_lookup_ctx (stmt); gcc_assert (ctx); lower_omp_teams (gsi_p, ctx); break; case GIMPLE_CALL: tree fndecl; call_stmt = as_a <gcall *> (stmt); fndecl = gimple_call_fndecl (call_stmt); if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) switch (DECL_FUNCTION_CODE (fndecl)) { case BUILT_IN_GOMP_BARRIER: if (ctx == NULL) break; /* FALLTHRU */ case BUILT_IN_GOMP_CANCEL: case BUILT_IN_GOMP_CANCELLATION_POINT: omp_context *cctx; cctx = ctx; if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION) cctx = cctx->outer; gcc_assert (gimple_call_lhs (call_stmt) == NULL_TREE); if (!cctx->cancellable) { if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_CANCELLATION_POINT) { stmt = gimple_build_nop (); gsi_replace (gsi_p, stmt, false); } break; } if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER) { fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL); gimple_call_set_fndecl (call_stmt, fndecl); gimple_call_set_fntype (call_stmt, TREE_TYPE (fndecl)); } tree lhs; lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl))); gimple_call_set_lhs (call_stmt, lhs); tree fallthru_label; fallthru_label = create_artificial_label (UNKNOWN_LOCATION); gimple g; g = gimple_build_label (fallthru_label); gsi_insert_after (gsi_p, g, GSI_SAME_STMT); g = gimple_build_cond (NE_EXPR, lhs, fold_convert (TREE_TYPE (lhs), boolean_false_node), cctx->cancel_label, fallthru_label); gsi_insert_after (gsi_p, g, GSI_SAME_STMT); break; default: break; } /* FALLTHRU */ default: if ((ctx || task_shared_vars) && walk_gimple_op (stmt, lower_omp_regimplify_p, ctx ? NULL : &wi)) { /* Just remove clobbers, this should happen only if we have "privatized" local addressable variables in SIMD regions, the clobber isn't needed in that case and gimplifying address of the ARRAY_REF into a pointer and creating MEM_REF based clobber would create worse code than we get with the clobber dropped. */ if (gimple_clobber_p (stmt)) { gsi_replace (gsi_p, gimple_build_nop (), true); break; } gimple_regimplify_operands (stmt, gsi_p); } break; } } static void lower_omp (gimple_seq *body, omp_context *ctx) { location_t saved_location = input_location; gimple_stmt_iterator gsi; for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi)) lower_omp_1 (&gsi, ctx); /* During gimplification, we haven't folded statments inside offloading regions (gimplify.c:maybe_fold_stmt); do that now. */ if (target_nesting_level) for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi)) fold_stmt (&gsi); input_location = saved_location; } /* Main entry point. */ static unsigned int execute_lower_omp (void) { gimple_seq body; int i; omp_context *ctx; /* This pass always runs, to provide PROP_gimple_lomp. But often, there is nothing to do. */ if (flag_cilkplus == 0 && flag_openacc == 0 && flag_openmp == 0 && flag_openmp_simd == 0) return 0; all_contexts = splay_tree_new (splay_tree_compare_pointers, 0, delete_omp_context); body = gimple_body (current_function_decl); scan_omp (&body, NULL); gcc_assert (taskreg_nesting_level == 0); FOR_EACH_VEC_ELT (taskreg_contexts, i, ctx) finish_taskreg_scan (ctx); taskreg_contexts.release (); if (all_contexts->root) { if (task_shared_vars) push_gimplify_context (); lower_omp (&body, NULL); if (task_shared_vars) pop_gimplify_context (NULL); } if (all_contexts) { splay_tree_delete (all_contexts); all_contexts = NULL; } BITMAP_FREE (task_shared_vars); return 0; } namespace { const pass_data pass_data_lower_omp = { GIMPLE_PASS, /* type */ "omplower", /* name */ OPTGROUP_NONE, /* optinfo_flags */ TV_NONE, /* tv_id */ PROP_gimple_any, /* properties_required */ PROP_gimple_lomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_lower_omp : public gimple_opt_pass { public: pass_lower_omp (gcc::context *ctxt) : gimple_opt_pass (pass_data_lower_omp, ctxt) {} /* opt_pass methods: */ virtual unsigned int execute (function *) { return execute_lower_omp (); } }; // class pass_lower_omp } // anon namespace gimple_opt_pass * make_pass_lower_omp (gcc::context *ctxt) { return new pass_lower_omp (ctxt); } /* The following is a utility to diagnose structured block violations. It is not part of the "omplower" pass, as that's invoked too late. It should be invoked by the respective front ends after gimplification. */ static splay_tree all_labels; /* Check for mismatched contexts and generate an error if needed. Return true if an error is detected. */ static bool diagnose_sb_0 (gimple_stmt_iterator *gsi_p, gimple branch_ctx, gimple label_ctx) { gcc_checking_assert (!branch_ctx || is_gimple_omp (branch_ctx)); gcc_checking_assert (!label_ctx || is_gimple_omp (label_ctx)); if (label_ctx == branch_ctx) return false; const char* kind = NULL; if (flag_cilkplus) { if ((branch_ctx && gimple_code (branch_ctx) == GIMPLE_OMP_FOR && gimple_omp_for_kind (branch_ctx) == GF_OMP_FOR_KIND_CILKSIMD) || (label_ctx && gimple_code (label_ctx) == GIMPLE_OMP_FOR && gimple_omp_for_kind (label_ctx) == GF_OMP_FOR_KIND_CILKSIMD)) kind = "Cilk Plus"; } if (flag_openacc) { if ((branch_ctx && is_gimple_omp_oacc (branch_ctx)) || (label_ctx && is_gimple_omp_oacc (label_ctx))) { gcc_checking_assert (kind == NULL); kind = "OpenACC"; } } if (kind == NULL) { gcc_checking_assert (flag_openmp); kind = "OpenMP"; } /* Previously we kept track of the label's entire context in diagnose_sb_[12] so we could traverse it and issue a correct "exit" or "enter" error message upon a structured block violation. We built the context by building a list with tree_cons'ing, but there is no easy counterpart in gimple tuples. It seems like far too much work for issuing exit/enter error messages. If someone really misses the distinct error message... patches welcome. */ #if 0 /* Try to avoid confusing the user by producing and error message with correct "exit" or "enter" verbiage. We prefer "exit" unless we can show that LABEL_CTX is nested within BRANCH_CTX. */ if (branch_ctx == NULL) exit_p = false; else { while (label_ctx) { if (TREE_VALUE (label_ctx) == branch_ctx) { exit_p = false; break; } label_ctx = TREE_CHAIN (label_ctx); } } if (exit_p) error ("invalid exit from %s structured block", kind); else error ("invalid entry to %s structured block", kind); #endif /* If it's obvious we have an invalid entry, be specific about the error. */ if (branch_ctx == NULL) error ("invalid entry to %s structured block", kind); else { /* Otherwise, be vague and lazy, but efficient. */ error ("invalid branch to/from %s structured block", kind); } gsi_replace (gsi_p, gimple_build_nop (), false); return true; } /* Pass 1: Create a minimal tree of structured blocks, and record where each label is found. */ static tree diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p, struct walk_stmt_info *wi) { gimple context = (gimple) wi->info; gimple inner_context; gimple stmt = gsi_stmt (*gsi_p); *handled_ops_p = true; switch (gimple_code (stmt)) { WALK_SUBSTMTS; case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_SECTION: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_CRITICAL: case GIMPLE_OMP_TARGET: case GIMPLE_OMP_TEAMS: case GIMPLE_OMP_TASKGROUP: /* The minimal context here is just the current OMP construct. */ inner_context = stmt; wi->info = inner_context; walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi); wi->info = context; break; case GIMPLE_OMP_FOR: inner_context = stmt; wi->info = inner_context; /* gimple_omp_for_{index,initial,final} are all DECLs; no need to walk them. */ walk_gimple_seq (gimple_omp_for_pre_body (stmt), diagnose_sb_1, NULL, wi); walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi); wi->info = context; break; case GIMPLE_LABEL: splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label ( as_a <glabel *> (stmt)), (splay_tree_value) context); break; default: break; } return NULL_TREE; } /* Pass 2: Check each branch and see if its context differs from that of the destination label's context. */ static tree diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p, struct walk_stmt_info *wi) { gimple context = (gimple) wi->info; splay_tree_node n; gimple stmt = gsi_stmt (*gsi_p); *handled_ops_p = true; switch (gimple_code (stmt)) { WALK_SUBSTMTS; case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_SECTION: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_CRITICAL: case GIMPLE_OMP_TARGET: case GIMPLE_OMP_TEAMS: case GIMPLE_OMP_TASKGROUP: wi->info = stmt; walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi); wi->info = context; break; case GIMPLE_OMP_FOR: wi->info = stmt; /* gimple_omp_for_{index,initial,final} are all DECLs; no need to walk them. */ walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt), diagnose_sb_2, NULL, wi); walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi); wi->info = context; break; case GIMPLE_COND: { gcond *cond_stmt = as_a <gcond *> (stmt); tree lab = gimple_cond_true_label (cond_stmt); if (lab) { n = splay_tree_lookup (all_labels, (splay_tree_key) lab); diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL); } lab = gimple_cond_false_label (cond_stmt); if (lab) { n = splay_tree_lookup (all_labels, (splay_tree_key) lab); diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL); } } break; case GIMPLE_GOTO: { tree lab = gimple_goto_dest (stmt); if (TREE_CODE (lab) != LABEL_DECL) break; n = splay_tree_lookup (all_labels, (splay_tree_key) lab); diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL); } break; case GIMPLE_SWITCH: { gswitch *switch_stmt = as_a <gswitch *> (stmt); unsigned int i; for (i = 0; i < gimple_switch_num_labels (switch_stmt); ++i) { tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i)); n = splay_tree_lookup (all_labels, (splay_tree_key) lab); if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value)) break; } } break; case GIMPLE_RETURN: diagnose_sb_0 (gsi_p, context, NULL); break; default: break; } return NULL_TREE; } /* Called from tree-cfg.c::make_edges to create cfg edges for all relevant GIMPLE_* codes. */ bool make_gimple_omp_edges (basic_block bb, struct omp_region **region, int *region_idx) { gimple last = last_stmt (bb); enum gimple_code code = gimple_code (last); struct omp_region *cur_region = *region; bool fallthru = false; switch (code) { case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: case GIMPLE_OMP_FOR: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_TEAMS: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_TASKGROUP: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_CRITICAL: case GIMPLE_OMP_SECTION: cur_region = new_omp_region (bb, code, cur_region); fallthru = true; break; case GIMPLE_OMP_TARGET: cur_region = new_omp_region (bb, code, cur_region); fallthru = true; switch (gimple_omp_target_kind (last)) { case GF_OMP_TARGET_KIND_REGION: case GF_OMP_TARGET_KIND_DATA: case GF_OMP_TARGET_KIND_OACC_PARALLEL: case GF_OMP_TARGET_KIND_OACC_KERNELS: case GF_OMP_TARGET_KIND_OACC_DATA: break; case GF_OMP_TARGET_KIND_UPDATE: case GF_OMP_TARGET_KIND_OACC_UPDATE: case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: cur_region = cur_region->outer; break; default: gcc_unreachable (); } break; case GIMPLE_OMP_SECTIONS: cur_region = new_omp_region (bb, code, cur_region); fallthru = true; break; case GIMPLE_OMP_SECTIONS_SWITCH: fallthru = false; break; case GIMPLE_OMP_ATOMIC_LOAD: case GIMPLE_OMP_ATOMIC_STORE: fallthru = true; break; case GIMPLE_OMP_RETURN: /* In the case of a GIMPLE_OMP_SECTION, the edge will go somewhere other than the next block. This will be created later. */ cur_region->exit = bb; fallthru = cur_region->type != GIMPLE_OMP_SECTION; cur_region = cur_region->outer; break; case GIMPLE_OMP_CONTINUE: cur_region->cont = bb; switch (cur_region->type) { case GIMPLE_OMP_FOR: /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE succs edges as abnormal to prevent splitting them. */ single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL; /* Make the loopback edge. */ make_edge (bb, single_succ (cur_region->entry), EDGE_ABNORMAL); /* Create an edge from GIMPLE_OMP_FOR to exit, which corresponds to the case that the body of the loop is not executed at all. */ make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL); make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL); fallthru = false; break; case GIMPLE_OMP_SECTIONS: /* Wire up the edges into and out of the nested sections. */ { basic_block switch_bb = single_succ (cur_region->entry); struct omp_region *i; for (i = cur_region->inner; i ; i = i->next) { gcc_assert (i->type == GIMPLE_OMP_SECTION); make_edge (switch_bb, i->entry, 0); make_edge (i->exit, bb, EDGE_FALLTHRU); } /* Make the loopback edge to the block with GIMPLE_OMP_SECTIONS_SWITCH. */ make_edge (bb, switch_bb, 0); /* Make the edge from the switch to exit. */ make_edge (switch_bb, bb->next_bb, 0); fallthru = false; } break; default: gcc_unreachable (); } break; default: gcc_unreachable (); } if (*region != cur_region) { *region = cur_region; if (cur_region) *region_idx = cur_region->entry->index; else *region_idx = 0; } return fallthru; } static unsigned int diagnose_omp_structured_block_errors (void) { struct walk_stmt_info wi; gimple_seq body = gimple_body (current_function_decl); all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0); memset (&wi, 0, sizeof (wi)); walk_gimple_seq (body, diagnose_sb_1, NULL, &wi); memset (&wi, 0, sizeof (wi)); wi.want_locations = true; walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi); gimple_set_body (current_function_decl, body); splay_tree_delete (all_labels); all_labels = NULL; return 0; } namespace { const pass_data pass_data_diagnose_omp_blocks = { GIMPLE_PASS, /* type */ "*diagnose_omp_blocks", /* name */ OPTGROUP_NONE, /* optinfo_flags */ TV_NONE, /* tv_id */ PROP_gimple_any, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_diagnose_omp_blocks : public gimple_opt_pass { public: pass_diagnose_omp_blocks (gcc::context *ctxt) : gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt) {} /* opt_pass methods: */ virtual bool gate (function *) { return flag_cilkplus || flag_openacc || flag_openmp; } virtual unsigned int execute (function *) { return diagnose_omp_structured_block_errors (); } }; // class pass_diagnose_omp_blocks } // anon namespace gimple_opt_pass * make_pass_diagnose_omp_blocks (gcc::context *ctxt) { return new pass_diagnose_omp_blocks (ctxt); } /* SIMD clone supporting code. */ /* Allocate a fresh `simd_clone' and return it. NARGS is the number of arguments to reserve space for. */ static struct cgraph_simd_clone * simd_clone_struct_alloc (int nargs) { struct cgraph_simd_clone *clone_info; size_t len = (sizeof (struct cgraph_simd_clone) + nargs * sizeof (struct cgraph_simd_clone_arg)); clone_info = (struct cgraph_simd_clone *) ggc_internal_cleared_alloc (len); return clone_info; } /* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */ static inline void simd_clone_struct_copy (struct cgraph_simd_clone *to, struct cgraph_simd_clone *from) { memcpy (to, from, (sizeof (struct cgraph_simd_clone) + ((from->nargs - from->inbranch) * sizeof (struct cgraph_simd_clone_arg)))); } /* Return vector of parameter types of function FNDECL. This uses TYPE_ARG_TYPES if available, otherwise falls back to types of DECL_ARGUMENTS types. */ vec<tree> simd_clone_vector_of_formal_parm_types (tree fndecl) { if (TYPE_ARG_TYPES (TREE_TYPE (fndecl))) return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl)); vec<tree> args = ipa_get_vector_of_formal_parms (fndecl); unsigned int i; tree arg; FOR_EACH_VEC_ELT (args, i, arg) args[i] = TREE_TYPE (args[i]); return args; } /* Given a simd function in NODE, extract the simd specific information from the OMP clauses passed in CLAUSES, and return the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED is set to TRUE if the `inbranch' or `notinbranch' clause specified, otherwise set to FALSE. */ static struct cgraph_simd_clone * simd_clone_clauses_extract (struct cgraph_node *node, tree clauses, bool *inbranch_specified) { vec<tree> args = simd_clone_vector_of_formal_parm_types (node->decl); tree t; int n; *inbranch_specified = false; n = args.length (); if (n > 0 && args.last () == void_type_node) n--; /* To distinguish from an OpenMP simd clone, Cilk Plus functions to be cloned have a distinctive artificial label in addition to "omp declare simd". */ bool cilk_clone = (flag_cilkplus && lookup_attribute ("cilk simd function", DECL_ATTRIBUTES (node->decl))); /* Allocate one more than needed just in case this is an in-branch clone which will require a mask argument. */ struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1); clone_info->nargs = n; clone_info->cilk_elemental = cilk_clone; if (!clauses) { args.release (); return clone_info; } clauses = TREE_VALUE (clauses); if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE) return clone_info; for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t)) { switch (OMP_CLAUSE_CODE (t)) { case OMP_CLAUSE_INBRANCH: clone_info->inbranch = 1; *inbranch_specified = true; break; case OMP_CLAUSE_NOTINBRANCH: clone_info->inbranch = 0; *inbranch_specified = true; break; case OMP_CLAUSE_SIMDLEN: clone_info->simdlen = TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t)); break; case OMP_CLAUSE_LINEAR: { tree decl = OMP_CLAUSE_DECL (t); tree step = OMP_CLAUSE_LINEAR_STEP (t); int argno = TREE_INT_CST_LOW (decl); if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t)) { clone_info->args[argno].arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP; clone_info->args[argno].linear_step = tree_to_shwi (step); gcc_assert (clone_info->args[argno].linear_step >= 0 && clone_info->args[argno].linear_step < n); } else { if (POINTER_TYPE_P (args[argno])) step = fold_convert (ssizetype, step); if (!tree_fits_shwi_p (step)) { warning_at (OMP_CLAUSE_LOCATION (t), 0, "ignoring large linear step"); args.release (); return NULL; } else if (integer_zerop (step)) { warning_at (OMP_CLAUSE_LOCATION (t), 0, "ignoring zero linear step"); args.release (); return NULL; } else { clone_info->args[argno].arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP; clone_info->args[argno].linear_step = tree_to_shwi (step); } } break; } case OMP_CLAUSE_UNIFORM: { tree decl = OMP_CLAUSE_DECL (t); int argno = tree_to_uhwi (decl); clone_info->args[argno].arg_type = SIMD_CLONE_ARG_TYPE_UNIFORM; break; } case OMP_CLAUSE_ALIGNED: { tree decl = OMP_CLAUSE_DECL (t); int argno = tree_to_uhwi (decl); clone_info->args[argno].alignment = TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t)); break; } default: break; } } args.release (); return clone_info; } /* Given a SIMD clone in NODE, calculate the characteristic data type and return the coresponding type. The characteristic data type is computed as described in the Intel Vector ABI. */ static tree simd_clone_compute_base_data_type (struct cgraph_node *node, struct cgraph_simd_clone *clone_info) { tree type = integer_type_node; tree fndecl = node->decl; /* a) For non-void function, the characteristic data type is the return type. */ if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE) type = TREE_TYPE (TREE_TYPE (fndecl)); /* b) If the function has any non-uniform, non-linear parameters, then the characteristic data type is the type of the first such parameter. */ else { vec<tree> map = simd_clone_vector_of_formal_parm_types (fndecl); for (unsigned int i = 0; i < clone_info->nargs; ++i) if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR) { type = map[i]; break; } map.release (); } /* c) If the characteristic data type determined by a) or b) above is struct, union, or class type which is pass-by-value (except for the type that maps to the built-in complex data type), the characteristic data type is int. */ if (RECORD_OR_UNION_TYPE_P (type) && !aggregate_value_p (type, NULL) && TREE_CODE (type) != COMPLEX_TYPE) return integer_type_node; /* d) If none of the above three classes is applicable, the characteristic data type is int. */ return type; /* e) For Intel Xeon Phi native and offload compilation, if the resulting characteristic data type is 8-bit or 16-bit integer data type, the characteristic data type is int. */ /* Well, we don't handle Xeon Phi yet. */ } static tree simd_clone_mangle (struct cgraph_node *node, struct cgraph_simd_clone *clone_info) { char vecsize_mangle = clone_info->vecsize_mangle; char mask = clone_info->inbranch ? 'M' : 'N'; unsigned int simdlen = clone_info->simdlen; unsigned int n; pretty_printer pp; gcc_assert (vecsize_mangle && simdlen); pp_string (&pp, "_ZGV"); pp_character (&pp, vecsize_mangle); pp_character (&pp, mask); pp_decimal_int (&pp, simdlen); for (n = 0; n < clone_info->nargs; ++n) { struct cgraph_simd_clone_arg arg = clone_info->args[n]; if (arg.arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM) pp_character (&pp, 'u'); else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP) { gcc_assert (arg.linear_step != 0); pp_character (&pp, 'l'); if (arg.linear_step > 1) pp_unsigned_wide_integer (&pp, arg.linear_step); else if (arg.linear_step < 0) { pp_character (&pp, 'n'); pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT) arg.linear_step)); } } else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP) { pp_character (&pp, 's'); pp_unsigned_wide_integer (&pp, arg.linear_step); } else pp_character (&pp, 'v'); if (arg.alignment) { pp_character (&pp, 'a'); pp_decimal_int (&pp, arg.alignment); } } pp_underscore (&pp); const char *str = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl)); if (*str == '*') ++str; pp_string (&pp, str); str = pp_formatted_text (&pp); /* If there already is a SIMD clone with the same mangled name, don't add another one. This can happen e.g. for #pragma omp declare simd #pragma omp declare simd simdlen(8) int foo (int, int); if the simdlen is assumed to be 8 for the first one, etc. */ for (struct cgraph_node *clone = node->simd_clones; clone; clone = clone->simdclone->next_clone) if (strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (clone->decl)), str) == 0) return NULL_TREE; return get_identifier (str); } /* Create a simd clone of OLD_NODE and return it. */ static struct cgraph_node * simd_clone_create (struct cgraph_node *old_node) { struct cgraph_node *new_node; if (old_node->definition) { if (!old_node->has_gimple_body_p ()) return NULL; old_node->get_body (); new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL, false, NULL, NULL, "simdclone"); } else { tree old_decl = old_node->decl; tree new_decl = copy_node (old_node->decl); DECL_NAME (new_decl) = clone_function_name (old_decl, "simdclone"); SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl)); SET_DECL_RTL (new_decl, NULL); DECL_STATIC_CONSTRUCTOR (new_decl) = 0; DECL_STATIC_DESTRUCTOR (new_decl) = 0; new_node = old_node->create_version_clone (new_decl, vNULL, NULL); symtab->call_cgraph_insertion_hooks (new_node); } if (new_node == NULL) return new_node; TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl); /* The function cgraph_function_versioning () will force the new symbol local. Undo this, and inherit external visability from the old node. */ new_node->local.local = old_node->local.local; new_node->externally_visible = old_node->externally_visible; return new_node; } /* Adjust the return type of the given function to its appropriate vector counterpart. Returns a simd array to be used throughout the function as a return value. */ static tree simd_clone_adjust_return_type (struct cgraph_node *node) { tree fndecl = node->decl; tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl)); unsigned int veclen; tree t; /* Adjust the function return type. */ if (orig_rettype == void_type_node) return NULL_TREE; TREE_TYPE (fndecl) = build_distinct_type_copy (TREE_TYPE (fndecl)); t = TREE_TYPE (TREE_TYPE (fndecl)); if (INTEGRAL_TYPE_P (t) || POINTER_TYPE_P (t)) veclen = node->simdclone->vecsize_int; else veclen = node->simdclone->vecsize_float; veclen /= GET_MODE_BITSIZE (TYPE_MODE (t)); if (veclen > node->simdclone->simdlen) veclen = node->simdclone->simdlen; if (POINTER_TYPE_P (t)) t = pointer_sized_int_node; if (veclen == node->simdclone->simdlen) t = build_vector_type (t, node->simdclone->simdlen); else { t = build_vector_type (t, veclen); t = build_array_type_nelts (t, node->simdclone->simdlen / veclen); } TREE_TYPE (TREE_TYPE (fndecl)) = t; if (!node->definition) return NULL_TREE; t = DECL_RESULT (fndecl); /* Adjust the DECL_RESULT. */ gcc_assert (TREE_TYPE (t) != void_type_node); TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl)); relayout_decl (t); tree atype = build_array_type_nelts (orig_rettype, node->simdclone->simdlen); if (veclen != node->simdclone->simdlen) return build1 (VIEW_CONVERT_EXPR, atype, t); /* Set up a SIMD array to use as the return value. */ tree retval = create_tmp_var_raw (atype, "retval"); gimple_add_tmp_var (retval); return retval; } /* Each vector argument has a corresponding array to be used locally as part of the eventual loop. Create such temporary array and return it. PREFIX is the prefix to be used for the temporary. TYPE is the inner element type. SIMDLEN is the number of elements. */ static tree create_tmp_simd_array (const char *prefix, tree type, int simdlen) { tree atype = build_array_type_nelts (type, simdlen); tree avar = create_tmp_var_raw (atype, prefix); gimple_add_tmp_var (avar); return avar; } /* Modify the function argument types to their corresponding vector counterparts if appropriate. Also, create one array for each simd argument to be used locally when using the function arguments as part of the loop. NODE is the function whose arguments are to be adjusted. Returns an adjustment vector that will be filled describing how the argument types will be adjusted. */ static ipa_parm_adjustment_vec simd_clone_adjust_argument_types (struct cgraph_node *node) { vec<tree> args; ipa_parm_adjustment_vec adjustments; if (node->definition) args = ipa_get_vector_of_formal_parms (node->decl); else args = simd_clone_vector_of_formal_parm_types (node->decl); adjustments.create (args.length ()); unsigned i, j, veclen; struct ipa_parm_adjustment adj; for (i = 0; i < node->simdclone->nargs; ++i) { memset (&adj, 0, sizeof (adj)); tree parm = args[i]; tree parm_type = node->definition ? TREE_TYPE (parm) : parm; adj.base_index = i; adj.base = parm; node->simdclone->args[i].orig_arg = node->definition ? parm : NULL_TREE; node->simdclone->args[i].orig_type = parm_type; if (node->simdclone->args[i].arg_type != SIMD_CLONE_ARG_TYPE_VECTOR) { /* No adjustment necessary for scalar arguments. */ adj.op = IPA_PARM_OP_COPY; } else { if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type)) veclen = node->simdclone->vecsize_int; else veclen = node->simdclone->vecsize_float; veclen /= GET_MODE_BITSIZE (TYPE_MODE (parm_type)); if (veclen > node->simdclone->simdlen) veclen = node->simdclone->simdlen; adj.arg_prefix = "simd"; if (POINTER_TYPE_P (parm_type)) adj.type = build_vector_type (pointer_sized_int_node, veclen); else adj.type = build_vector_type (parm_type, veclen); node->simdclone->args[i].vector_type = adj.type; for (j = veclen; j < node->simdclone->simdlen; j += veclen) { adjustments.safe_push (adj); if (j == veclen) { memset (&adj, 0, sizeof (adj)); adj.op = IPA_PARM_OP_NEW; adj.arg_prefix = "simd"; adj.base_index = i; adj.type = node->simdclone->args[i].vector_type; } } if (node->definition) node->simdclone->args[i].simd_array = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)), parm_type, node->simdclone->simdlen); } adjustments.safe_push (adj); } if (node->simdclone->inbranch) { tree base_type = simd_clone_compute_base_data_type (node->simdclone->origin, node->simdclone); memset (&adj, 0, sizeof (adj)); adj.op = IPA_PARM_OP_NEW; adj.arg_prefix = "mask"; adj.base_index = i; if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type)) veclen = node->simdclone->vecsize_int; else veclen = node->simdclone->vecsize_float; veclen /= GET_MODE_BITSIZE (TYPE_MODE (base_type)); if (veclen > node->simdclone->simdlen) veclen = node->simdclone->simdlen; if (POINTER_TYPE_P (base_type)) adj.type = build_vector_type (pointer_sized_int_node, veclen); else adj.type = build_vector_type (base_type, veclen); adjustments.safe_push (adj); for (j = veclen; j < node->simdclone->simdlen; j += veclen) adjustments.safe_push (adj); /* We have previously allocated one extra entry for the mask. Use it and fill it. */ struct cgraph_simd_clone *sc = node->simdclone; sc->nargs++; if (node->definition) { sc->args[i].orig_arg = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type); sc->args[i].simd_array = create_tmp_simd_array ("mask", base_type, sc->simdlen); } sc->args[i].orig_type = base_type; sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK; } if (node->definition) ipa_modify_formal_parameters (node->decl, adjustments); else { tree new_arg_types = NULL_TREE, new_reversed; bool last_parm_void = false; if (args.length () > 0 && args.last () == void_type_node) last_parm_void = true; gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl))); j = adjustments.length (); for (i = 0; i < j; i++) { struct ipa_parm_adjustment *adj = &adjustments[i]; tree ptype; if (adj->op == IPA_PARM_OP_COPY) ptype = args[adj->base_index]; else ptype = adj->type; new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types); } new_reversed = nreverse (new_arg_types); if (last_parm_void) { if (new_reversed) TREE_CHAIN (new_arg_types) = void_list_node; else new_reversed = void_list_node; } tree new_type = build_distinct_type_copy (TREE_TYPE (node->decl)); TYPE_ARG_TYPES (new_type) = new_reversed; TREE_TYPE (node->decl) = new_type; adjustments.release (); } args.release (); return adjustments; } /* Initialize and copy the function arguments in NODE to their corresponding local simd arrays. Returns a fresh gimple_seq with the instruction sequence generated. */ static gimple_seq simd_clone_init_simd_arrays (struct cgraph_node *node, ipa_parm_adjustment_vec adjustments) { gimple_seq seq = NULL; unsigned i = 0, j = 0, k; for (tree arg = DECL_ARGUMENTS (node->decl); arg; arg = DECL_CHAIN (arg), i++, j++) { if (adjustments[j].op == IPA_PARM_OP_COPY) continue; node->simdclone->args[i].vector_arg = arg; tree array = node->simdclone->args[i].simd_array; if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg)) == node->simdclone->simdlen) { tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array))); tree ptr = build_fold_addr_expr (array); tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr, build_int_cst (ptype, 0)); t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg); gimplify_and_add (t, &seq); } else { unsigned int simdlen = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg)); tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array))); for (k = 0; k < node->simdclone->simdlen; k += simdlen) { tree ptr = build_fold_addr_expr (array); int elemsize; if (k) { arg = DECL_CHAIN (arg); j++; } elemsize = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (arg)))); tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr, build_int_cst (ptype, k * elemsize)); t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg); gimplify_and_add (t, &seq); } } } return seq; } /* Callback info for ipa_simd_modify_stmt_ops below. */ struct modify_stmt_info { ipa_parm_adjustment_vec adjustments; gimple stmt; /* True if the parent statement was modified by ipa_simd_modify_stmt_ops. */ bool modified; }; /* Callback for walk_gimple_op. Adjust operands from a given statement as specified in the adjustments vector in the callback data. */ static tree ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = (struct walk_stmt_info *) data; struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info; tree *orig_tp = tp; if (TREE_CODE (*tp) == ADDR_EXPR) tp = &TREE_OPERAND (*tp, 0); struct ipa_parm_adjustment *cand = NULL; if (TREE_CODE (*tp) == PARM_DECL) cand = ipa_get_adjustment_candidate (&tp, NULL, info->adjustments, true); else { if (TYPE_P (*tp)) *walk_subtrees = 0; } tree repl = NULL_TREE; if (cand) repl = unshare_expr (cand->new_decl); else { if (tp != orig_tp) { *walk_subtrees = 0; bool modified = info->modified; info->modified = false; walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset); if (!info->modified) { info->modified = modified; return NULL_TREE; } info->modified = modified; repl = *tp; } else return NULL_TREE; } if (tp != orig_tp) { repl = build_fold_addr_expr (repl); gimple stmt; if (is_gimple_debug (info->stmt)) { tree vexpr = make_node (DEBUG_EXPR_DECL); stmt = gimple_build_debug_source_bind (vexpr, repl, NULL); DECL_ARTIFICIAL (vexpr) = 1; TREE_TYPE (vexpr) = TREE_TYPE (repl); DECL_MODE (vexpr) = TYPE_MODE (TREE_TYPE (repl)); repl = vexpr; } else { stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (repl)), repl); repl = gimple_assign_lhs (stmt); } gimple_stmt_iterator gsi = gsi_for_stmt (info->stmt); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); *orig_tp = repl; } else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl))) { tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl); *tp = vce; } else *tp = repl; info->modified = true; return NULL_TREE; } /* Traverse the function body and perform all modifications as described in ADJUSTMENTS. At function return, ADJUSTMENTS will be modified such that the replacement/reduction value will now be an offset into the corresponding simd_array. This function will replace all function argument uses with their corresponding simd array elements, and ajust the return values accordingly. */ static void ipa_simd_modify_function_body (struct cgraph_node *node, ipa_parm_adjustment_vec adjustments, tree retval_array, tree iter) { basic_block bb; unsigned int i, j, l; /* Re-use the adjustments array, but this time use it to replace every function argument use to an offset into the corresponding simd_array. */ for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j) { if (!node->simdclone->args[i].vector_arg) continue; tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg); tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg); adjustments[j].new_decl = build4 (ARRAY_REF, basetype, node->simdclone->args[i].simd_array, iter, NULL_TREE, NULL_TREE); if (adjustments[j].op == IPA_PARM_OP_NONE && TYPE_VECTOR_SUBPARTS (vectype) < node->simdclone->simdlen) j += node->simdclone->simdlen / TYPE_VECTOR_SUBPARTS (vectype) - 1; } l = adjustments.length (); for (i = 1; i < num_ssa_names; i++) { tree name = ssa_name (i); if (name && SSA_NAME_VAR (name) && TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL) { for (j = 0; j < l; j++) if (SSA_NAME_VAR (name) == adjustments[j].base && adjustments[j].new_decl) { tree base_var; if (adjustments[j].new_ssa_base == NULL_TREE) { base_var = copy_var_decl (adjustments[j].base, DECL_NAME (adjustments[j].base), TREE_TYPE (adjustments[j].base)); adjustments[j].new_ssa_base = base_var; } else base_var = adjustments[j].new_ssa_base; if (SSA_NAME_IS_DEFAULT_DEF (name)) { bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); gimple_stmt_iterator gsi = gsi_after_labels (bb); tree new_decl = unshare_expr (adjustments[j].new_decl); set_ssa_default_def (cfun, adjustments[j].base, NULL_TREE); SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var); SSA_NAME_IS_DEFAULT_DEF (name) = 0; gimple stmt = gimple_build_assign (name, new_decl); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); } else SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var); } } } struct modify_stmt_info info; info.adjustments = adjustments; FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl)) { gimple_stmt_iterator gsi; gsi = gsi_start_bb (bb); while (!gsi_end_p (gsi)) { gimple stmt = gsi_stmt (gsi); info.stmt = stmt; struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); info.modified = false; wi.info = &info; walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi); if (greturn *return_stmt = dyn_cast <greturn *> (stmt)) { tree retval = gimple_return_retval (return_stmt); if (!retval) { gsi_remove (&gsi, true); continue; } /* Replace `return foo' with `retval_array[iter] = foo'. */ tree ref = build4 (ARRAY_REF, TREE_TYPE (retval), retval_array, iter, NULL, NULL); stmt = gimple_build_assign (ref, retval); gsi_replace (&gsi, stmt, true); info.modified = true; } if (info.modified) { update_stmt (stmt); if (maybe_clean_eh_stmt (stmt)) gimple_purge_dead_eh_edges (gimple_bb (stmt)); } gsi_next (&gsi); } } } /* Adjust the argument types in NODE to their appropriate vector counterparts. */ static void simd_clone_adjust (struct cgraph_node *node) { push_cfun (DECL_STRUCT_FUNCTION (node->decl)); targetm.simd_clone.adjust (node); tree retval = simd_clone_adjust_return_type (node); ipa_parm_adjustment_vec adjustments = simd_clone_adjust_argument_types (node); push_gimplify_context (); gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments); /* Adjust all uses of vector arguments accordingly. Adjust all return values accordingly. */ tree iter = create_tmp_var (unsigned_type_node, "iter"); tree iter1 = make_ssa_name (iter); tree iter2 = make_ssa_name (iter); ipa_simd_modify_function_body (node, adjustments, retval, iter1); /* Initialize the iteration variable. */ basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); basic_block body_bb = split_block_after_labels (entry_bb)->dest; gimple_stmt_iterator gsi = gsi_after_labels (entry_bb); /* Insert the SIMD array and iv initialization at function entry. */ gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT); pop_gimplify_context (NULL); /* Create a new BB right before the original exit BB, to hold the iteration increment and the condition/branch. */ basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src; basic_block incr_bb = create_empty_bb (orig_exit); add_bb_to_loop (incr_bb, body_bb->loop_father); /* The succ of orig_exit was EXIT_BLOCK_PTR_FOR_FN (cfun), with an empty flag. Set it now to be a FALLTHRU_EDGE. */ gcc_assert (EDGE_COUNT (orig_exit->succs) == 1); EDGE_SUCC (orig_exit, 0)->flags |= EDGE_FALLTHRU; for (unsigned i = 0; i < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds); ++i) { edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), i); redirect_edge_succ (e, incr_bb); } edge e = make_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0); e->probability = REG_BR_PROB_BASE; gsi = gsi_last_bb (incr_bb); gimple g = gimple_build_assign (iter2, PLUS_EXPR, iter1, build_int_cst (unsigned_type_node, 1)); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); /* Mostly annotate the loop for the vectorizer (the rest is done below). */ struct loop *loop = alloc_loop (); cfun->has_force_vectorize_loops = true; loop->safelen = node->simdclone->simdlen; loop->force_vectorize = true; loop->header = body_bb; /* Branch around the body if the mask applies. */ if (node->simdclone->inbranch) { gimple_stmt_iterator gsi = gsi_last_bb (loop->header); tree mask_array = node->simdclone->args[node->simdclone->nargs - 1].simd_array; tree mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array))); tree aref = build4 (ARRAY_REF, TREE_TYPE (TREE_TYPE (mask_array)), mask_array, iter1, NULL, NULL); g = gimple_build_assign (mask, aref); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (aref))); if (!INTEGRAL_TYPE_P (TREE_TYPE (aref))) { aref = build1 (VIEW_CONVERT_EXPR, build_nonstandard_integer_type (bitsize, 0), mask); mask = make_ssa_name (TREE_TYPE (aref)); g = gimple_build_assign (mask, aref); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); } g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)), NULL, NULL); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE); FALLTHRU_EDGE (loop->header)->flags = EDGE_FALSE_VALUE; } /* Generate the condition. */ g = gimple_build_cond (LT_EXPR, iter2, build_int_cst (unsigned_type_node, node->simdclone->simdlen), NULL, NULL); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); e = split_block (incr_bb, gsi_stmt (gsi)); basic_block latch_bb = e->dest; basic_block new_exit_bb; new_exit_bb = split_block (latch_bb, NULL)->dest; loop->latch = latch_bb; redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb); make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE); /* The successor of incr_bb is already pointing to latch_bb; just change the flags. make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */ FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE; gphi *phi = create_phi_node (iter1, body_bb); edge preheader_edge = find_edge (entry_bb, body_bb); edge latch_edge = single_succ_edge (latch_bb); add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge, UNKNOWN_LOCATION); add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION); /* Generate the new return. */ gsi = gsi_last_bb (new_exit_bb); if (retval && TREE_CODE (retval) == VIEW_CONVERT_EXPR && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL) retval = TREE_OPERAND (retval, 0); else if (retval) { retval = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (TREE_TYPE (node->decl)), retval); retval = force_gimple_operand_gsi (&gsi, retval, true, NULL, false, GSI_CONTINUE_LINKING); } g = gimple_build_return (retval); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); /* Handle aligned clauses by replacing default defs of the aligned uniform args with __builtin_assume_aligned (arg_N(D), alignment) lhs. Handle linear by adding PHIs. */ for (unsigned i = 0; i < node->simdclone->nargs; i++) if (node->simdclone->args[i].alignment && node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM && (node->simdclone->args[i].alignment & (node->simdclone->args[i].alignment - 1)) == 0 && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg)) == POINTER_TYPE) { unsigned int alignment = node->simdclone->args[i].alignment; tree orig_arg = node->simdclone->args[i].orig_arg; tree def = ssa_default_def (cfun, orig_arg); if (def && !has_zero_uses (def)) { tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED); gimple_seq seq = NULL; bool need_cvt = false; gcall *call = gimple_build_call (fn, 2, def, size_int (alignment)); g = call; if (!useless_type_conversion_p (TREE_TYPE (orig_arg), ptr_type_node)) need_cvt = true; tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg); gimple_call_set_lhs (g, t); gimple_seq_add_stmt_without_update (&seq, g); if (need_cvt) { t = make_ssa_name (orig_arg); g = gimple_build_assign (t, NOP_EXPR, gimple_call_lhs (g)); gimple_seq_add_stmt_without_update (&seq, g); } gsi_insert_seq_on_edge_immediate (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq); entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); int freq = compute_call_stmt_bb_frequency (current_function_decl, entry_bb); node->create_edge (cgraph_node::get_create (fn), call, entry_bb->count, freq); imm_use_iterator iter; use_operand_p use_p; gimple use_stmt; tree repl = gimple_get_lhs (g); FOR_EACH_IMM_USE_STMT (use_stmt, iter, def) if (is_gimple_debug (use_stmt) || use_stmt == call) continue; else FOR_EACH_IMM_USE_ON_STMT (use_p, iter) SET_USE (use_p, repl); } } else if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP) { tree orig_arg = node->simdclone->args[i].orig_arg; tree def = ssa_default_def (cfun, orig_arg); gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg)) || POINTER_TYPE_P (TREE_TYPE (orig_arg))); if (def && !has_zero_uses (def)) { iter1 = make_ssa_name (orig_arg); iter2 = make_ssa_name (orig_arg); phi = create_phi_node (iter1, body_bb); add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION); add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION); enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg)) ? PLUS_EXPR : POINTER_PLUS_EXPR; tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg)) ? TREE_TYPE (orig_arg) : sizetype; tree addcst = build_int_cst (addtype, node->simdclone->args[i].linear_step); g = gimple_build_assign (iter2, code, iter1, addcst); gsi = gsi_last_bb (incr_bb); gsi_insert_before (&gsi, g, GSI_SAME_STMT); imm_use_iterator iter; use_operand_p use_p; gimple use_stmt; FOR_EACH_IMM_USE_STMT (use_stmt, iter, def) if (use_stmt == phi) continue; else FOR_EACH_IMM_USE_ON_STMT (use_p, iter) SET_USE (use_p, iter1); } } calculate_dominance_info (CDI_DOMINATORS); add_loop (loop, loop->header->loop_father); update_ssa (TODO_update_ssa); pop_cfun (); } /* If the function in NODE is tagged as an elemental SIMD function, create the appropriate SIMD clones. */ static void expand_simd_clones (struct cgraph_node *node) { tree attr = lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (node->decl)); if (attr == NULL_TREE || node->global.inlined_to || lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl))) return; /* Ignore #pragma omp declare simd extern int foo (); in C, there we don't know the argument types at all. */ if (!node->definition && TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE) return; do { /* Start with parsing the "omp declare simd" attribute(s). */ bool inbranch_clause_specified; struct cgraph_simd_clone *clone_info = simd_clone_clauses_extract (node, TREE_VALUE (attr), &inbranch_clause_specified); if (clone_info == NULL) continue; int orig_simdlen = clone_info->simdlen; tree base_type = simd_clone_compute_base_data_type (node, clone_info); /* The target can return 0 (no simd clones should be created), 1 (just one ISA of simd clones should be created) or higher count of ISA variants. In that case, clone_info is initialized for the first ISA variant. */ int count = targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info, base_type, 0); if (count == 0) continue; /* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED, also create one inbranch and one !inbranch clone of it. */ for (int i = 0; i < count * 2; i++) { struct cgraph_simd_clone *clone = clone_info; if (inbranch_clause_specified && (i & 1) != 0) continue; if (i != 0) { clone = simd_clone_struct_alloc (clone_info->nargs + ((i & 1) != 0)); simd_clone_struct_copy (clone, clone_info); /* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen and simd_clone_adjust_argument_types did to the first clone's info. */ clone->nargs -= clone_info->inbranch; clone->simdlen = orig_simdlen; /* And call the target hook again to get the right ISA. */ targetm.simd_clone.compute_vecsize_and_simdlen (node, clone, base_type, i / 2); if ((i & 1) != 0) clone->inbranch = 1; } /* simd_clone_mangle might fail if such a clone has been created already. */ tree id = simd_clone_mangle (node, clone); if (id == NULL_TREE) continue; /* Only when we are sure we want to create the clone actually clone the function (or definitions) or create another extern FUNCTION_DECL (for prototypes without definitions). */ struct cgraph_node *n = simd_clone_create (node); if (n == NULL) continue; n->simdclone = clone; clone->origin = node; clone->next_clone = NULL; if (node->simd_clones == NULL) { clone->prev_clone = n; node->simd_clones = n; } else { clone->prev_clone = node->simd_clones->simdclone->prev_clone; clone->prev_clone->simdclone->next_clone = n; node->simd_clones->simdclone->prev_clone = n; } symtab->change_decl_assembler_name (n->decl, id); /* And finally adjust the return type, parameters and for definitions also function body. */ if (node->definition) simd_clone_adjust (n); else { simd_clone_adjust_return_type (n); simd_clone_adjust_argument_types (n); } } } while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr)))); } /* Entry point for IPA simd clone creation pass. */ static unsigned int ipa_omp_simd_clone (void) { struct cgraph_node *node; FOR_EACH_FUNCTION (node) expand_simd_clones (node); return 0; } namespace { const pass_data pass_data_omp_simd_clone = { SIMPLE_IPA_PASS, /* type */ "simdclone", /* name */ OPTGROUP_NONE, /* optinfo_flags */ TV_NONE, /* tv_id */ ( PROP_ssa | PROP_cfg ), /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_omp_simd_clone : public simple_ipa_opt_pass { public: pass_omp_simd_clone(gcc::context *ctxt) : simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt) {} /* opt_pass methods: */ virtual bool gate (function *); virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); } }; bool pass_omp_simd_clone::gate (function *) { return ((flag_openmp || flag_openmp_simd || flag_cilkplus || (in_lto_p && !flag_wpa)) && (targetm.simd_clone.compute_vecsize_and_simdlen != NULL)); } } // anon namespace simple_ipa_opt_pass * make_pass_omp_simd_clone (gcc::context *ctxt) { return new pass_omp_simd_clone (ctxt); } /* Helper function for omp_finish_file routine. Takes decls from V_DECLS and adds their addresses and sizes to constructor-vector V_CTOR. */ static void add_decls_addresses_to_decl_constructor (vec<tree, va_gc> *v_decls, vec<constructor_elt, va_gc> *v_ctor) { unsigned len = vec_safe_length (v_decls); for (unsigned i = 0; i < len; i++) { tree it = (*v_decls)[i]; bool is_function = TREE_CODE (it) != VAR_DECL; CONSTRUCTOR_APPEND_ELT (v_ctor, NULL_TREE, build_fold_addr_expr (it)); if (!is_function) CONSTRUCTOR_APPEND_ELT (v_ctor, NULL_TREE, fold_convert (const_ptr_type_node, DECL_SIZE_UNIT (it))); } } /* Create new symbols containing (address, size) pairs for global variables, marked with "omp declare target" attribute, as well as addresses for the functions, which are outlined offloading regions. */ void omp_finish_file (void) { unsigned num_funcs = vec_safe_length (offload_funcs); unsigned num_vars = vec_safe_length (offload_vars); if (num_funcs == 0 && num_vars == 0) return; if (targetm_common.have_named_sections) { vec<constructor_elt, va_gc> *v_f, *v_v; vec_alloc (v_f, num_funcs); vec_alloc (v_v, num_vars * 2); add_decls_addresses_to_decl_constructor (offload_funcs, v_f); add_decls_addresses_to_decl_constructor (offload_vars, v_v); tree vars_decl_type = build_array_type_nelts (pointer_sized_int_node, num_vars * 2); tree funcs_decl_type = build_array_type_nelts (pointer_sized_int_node, num_funcs); TYPE_ALIGN (vars_decl_type) = TYPE_ALIGN (pointer_sized_int_node); TYPE_ALIGN (funcs_decl_type) = TYPE_ALIGN (pointer_sized_int_node); tree ctor_v = build_constructor (vars_decl_type, v_v); tree ctor_f = build_constructor (funcs_decl_type, v_f); TREE_CONSTANT (ctor_v) = TREE_CONSTANT (ctor_f) = 1; TREE_STATIC (ctor_v) = TREE_STATIC (ctor_f) = 1; tree funcs_decl = build_decl (UNKNOWN_LOCATION, VAR_DECL, get_identifier (".offload_func_table"), funcs_decl_type); tree vars_decl = build_decl (UNKNOWN_LOCATION, VAR_DECL, get_identifier (".offload_var_table"), vars_decl_type); TREE_STATIC (funcs_decl) = TREE_STATIC (vars_decl) = 1; /* Do not align tables more than TYPE_ALIGN (pointer_sized_int_node), otherwise a joint table in a binary will contain padding between tables from multiple object files. */ DECL_USER_ALIGN (funcs_decl) = DECL_USER_ALIGN (vars_decl) = 1; DECL_ALIGN (funcs_decl) = TYPE_ALIGN (funcs_decl_type); DECL_ALIGN (vars_decl) = TYPE_ALIGN (vars_decl_type); DECL_INITIAL (funcs_decl) = ctor_f; DECL_INITIAL (vars_decl) = ctor_v; set_decl_section_name (funcs_decl, OFFLOAD_FUNC_TABLE_SECTION_NAME); set_decl_section_name (vars_decl, OFFLOAD_VAR_TABLE_SECTION_NAME); varpool_node::finalize_decl (vars_decl); varpool_node::finalize_decl (funcs_decl); } else { for (unsigned i = 0; i < num_funcs; i++) { tree it = (*offload_funcs)[i]; targetm.record_offload_symbol (it); } for (unsigned i = 0; i < num_vars; i++) { tree it = (*offload_vars)[i]; targetm.record_offload_symbol (it); } } } #include "gt-omp-low.h"
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(4*t2-Nz-4,8));t3<=min(min(min(floord(4*t2+Ny,8),floord(Nt+Ny-4,8)),floord(2*t1+Ny+1,8)),floord(4*t1-4*t2+Nz+Ny-1,8));t3++) { for (t4=max(max(max(0,ceild(t1-31,32)),ceild(4*t2-Nz-60,64)),ceild(8*t3-Ny-60,64));t4<=min(min(min(min(floord(4*t2+Nx,64),floord(Nt+Nx-4,64)),floord(2*t1+Nx+1,64)),floord(8*t3+Nx+4,64)),floord(4*t1-4*t2+Nz+Nx-1,64));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),8*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),8*t3+6),64*t4+62),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_AxB_saxpy3_cumsum.c
//------------------------------------------------------------------------------ // GB_AxB_saxpy3_cumsum: finalize nnz(C(:,j)) and find cumulative sum of Cp //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // phase3: fine tasks finalize their computation nnz(C(:,j)) // phase4: cumulative sum of C->p #include "GB_AxB_saxpy3.h" int64_t GB_AxB_saxpy3_cumsum // return cjnz_max for fine tasks ( GrB_Matrix C, // finalize C->p GB_saxpy3task_struct *TaskList, // list of tasks, and workspace int nfine, // number of fine tasks double chunk, // chunk size int nthreads // number of threads ) { //-------------------------------------------------------------------------- // get C //-------------------------------------------------------------------------- int64_t *GB_RESTRICT Cp = C->p ; const int64_t cvlen = C->vlen ; const int64_t cnvec = C->nvec ; //========================================================================== // phase3: count nnz(C(:,j)) for fine tasks //========================================================================== int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < nfine ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- // int64_t kk = TaskList [taskid].vector ; int64_t hash_size = TaskList [taskid].hsize ; bool use_Gustavson = (hash_size == cvlen) ; int team_size = TaskList [taskid].team_size ; int master = TaskList [taskid].master ; int my_teamid = taskid - master ; int64_t my_cjnz = 0 ; if (use_Gustavson) { //------------------------------------------------------------------ // phase3: fine Gustavson task, C=A*B, C<M>=A*B, or C<!M>=A*B //------------------------------------------------------------------ // Hf [i] == 2 if C(i,j) is an entry in C(:,j) uint8_t *GB_RESTRICT Hf = TaskList [taskid].Hf ; int64_t istart, iend ; GB_PARTITION (istart, iend, cvlen, my_teamid, team_size) ; for (int64_t i = istart ; i < iend ; i++) { if (Hf [i] == 2) { my_cjnz++ ; } } } else { //------------------------------------------------------------------ // phase3: fine hash task, C=A*B, C<M>=A*B, or C<!M>=A*B //------------------------------------------------------------------ // (Hf [hash] & 3) == 2 if C(i,j) is an entry in C(:,j), // and the index i of the entry is (Hf [hash] >> 2) - 1. int64_t *GB_RESTRICT Hf = TaskList [taskid].Hf ; int64_t mystart, myend ; GB_PARTITION (mystart, myend, hash_size, my_teamid, team_size) ; for (int64_t hash = mystart ; hash < myend ; hash++) { if ((Hf [hash] & 3) == 2) { my_cjnz++ ; } } } TaskList [taskid].my_cjnz = my_cjnz ; // count my nnz(C(:,j)) } //========================================================================== // phase4: compute Cp with cumulative sum //========================================================================== // TaskList [taskid].my_cjnz is the # of unique entries found in C(:,j) by // that task. Sum these terms to compute total # of entries in C(:,j). for (taskid = 0 ; taskid < nfine ; taskid++) { int64_t kk = TaskList [taskid].vector ; Cp [kk] = 0 ; } for (taskid = 0 ; taskid < nfine ; taskid++) { int64_t kk = TaskList [taskid].vector ; int64_t my_cjnz = TaskList [taskid].my_cjnz ; Cp [kk] += my_cjnz ; ASSERT (my_cjnz <= cvlen) ; } // Cp [kk] is now nnz (C (:,j)), for all vectors j, whether computed by // fine tasks or coarse tasks, and where j == (Bh == NULL) ? kk : Bh [kk]. int nth = GB_nthreads (cnvec, chunk, nthreads) ; GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nth) ; // cumulative sum of nnz (C (:,j)) for each team of fine tasks int64_t cjnz_sum = 0 ; int64_t cjnz_max = 0 ; for (taskid = 0 ; taskid < nfine ; taskid++) { if (taskid == TaskList [taskid].master) { cjnz_sum = 0 ; // also find the max (C (:,j)) for any fine hash tasks int64_t hash_size = TaskList [taskid].hsize ; bool use_Gustavson = (hash_size == cvlen) ; if (!use_Gustavson) { int64_t kk = TaskList [taskid].vector ; int64_t cjnz = Cp [kk+1] - Cp [kk] ; cjnz_max = GB_IMAX (cjnz_max, cjnz) ; } } int64_t my_cjnz = TaskList [taskid].my_cjnz ; TaskList [taskid].my_cjnz = cjnz_sum ; cjnz_sum += my_cjnz ; } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- return (cjnz_max) ; }
oldoffice_fmt_plug.c
/* MS Office 97-2003 cracker patch for JtR. Hacked together during May of * 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com> * Copyright (c) 2014, magnum * Copyright (c) 2009, David Leblanc (http://offcrypto.codeplex.com/) * * License: Microsoft Public License (MS-PL) * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_oldoffice; #elif FMT_REGISTERS_H john_register_one(&fmt_oldoffice); #else #include "md5.h" #include "rc4.h" #include <string.h> #include "stdint.h" #include <assert.h> #include "sha.h" #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "unicode.h" #ifdef _OPENMP #include <omp.h> #define OMP_SCALE 64 #endif #include "memdbg.h" #define FORMAT_LABEL "oldoffice" #define FORMAT_NAME "MS Office <= 2003" #define ALGORITHM_NAME "MD5/SHA1 RC4 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1000 #define PLAINTEXT_LENGTH 64 #define BINARY_SIZE 0 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN 1 #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define CIPHERTEXT_LENGTH (TAG_LEN + 120) #define FORMAT_TAG "$oldoffice$" #define TAG_LEN (sizeof(FORMAT_TAG) - 1) static struct fmt_tests oo_tests[] = { {"$oldoffice$1*de17a7f3c3ff03a39937ba9666d6e952*2374d5b6ce7449f57c9f252f9f9b53d2*e60e1185f7aecedba262f869c0236f81", "test"}, {"$oldoffice$0*e40b4fdade5be6be329c4238e2099b8a*259590322b55f7a3c38cb96b5864e72d*2e6516bfaf981770fe6819a34998295d", "123456789012345"}, /* 2003-RC4-40bit-MS-Base-Crypto-1.0_myhovercraftisfullofeels_.doc */ {"$oldoffice$3*9f32522fe9bcb69b12f39d3c24b39b2f*fac8b91a8a578468ae7001df4947558f*f2e267a5bea45736b52d6d1051eca1b935eabf3a", "myhovercraftisfullofeels"}, /* Test-RC4-40bit-MS-Base-DSS_myhovercraftisfullofeels_.doc */ {"$oldoffice$3*095b777a73a10fb6bcd3e48d50f8f8c5*36902daab0d0f38f587a84b24bd40dce*25db453f79e8cbe4da1844822b88f6ce18a5edd2", "myhovercraftisfullofeels"}, /* 2003-RC4-40bit-MS-Base-DH-SChan_myhovercraftisfullofeels_.doc */ {"$oldoffice$3*284bc91cb64bc847a7a44bc7bf34fb69*1f8c589c6fcbd43c42b2bc6fff4fd12b*2bc7d8e866c9ea40526d3c0a59e2d37d8ded3550", "myhovercraftisfullofeels"}, /* Test-RC4-128bit-MS-Strong-Crypto_myhovercraftisfullofeels_.doc */ {"$oldoffice$4*a58b39c30a06832ee664c1db48d17304*986a45cc9e17e062f05ceec37ec0db17*fe0c130ef374088f3fec1979aed4d67459a6eb9a", "myhovercraftisfullofeels"}, /* the following hash was extracted from Proc2356.ppt (manually + by oldoffice2john.py */ {"$oldoffice$3*DB575DDA2E450AB3DFDF77A2E9B3D4C7*AB183C4C8B5E5DD7B9F3AF8AE5FFF31A*B63594447FAE7D4945D2DAFD113FD8C9F6191BF5", "crypto"}, {"$oldoffice$3*3fbf56a18b026e25815cbea85a16036c*216562ea03b4165b54cfaabe89d36596*91308b40297b7ce31af2e8c57c6407994b205590", "openwall"}, /* 2003-RC4-40bit-MS-Base-1.0_myhovercraftisfullofeels_.xls */ {"$oldoffice$3*f426041b2eba9745d30c7949801f7d3a*888b34927e5f31e2703cc4ce86a6fd78*ff66200812fd06c1ba43ec2be9f3390addb20096", "myhovercraftisfullofeels"}, /* Meet-in-the-middle candidate produced with oclHashcat -m9710 */ /* Real pw is "hashcat", one collision is "zvDtu!" */ {"$oldoffice$1*d6aabb63363188b9b73a88efb9c9152e*afbbb9254764273f8f4fad9a5d82981f*6f09fd2eafc4ade522b5f2bee0eaf66d*f2ab1219ae", "zvDtu!"}, {NULL} }; /* Password encoded in UCS-2 */ static UTF16 (*saved_key)[PLAINTEXT_LENGTH + 1]; /* UCS-2 password length, in octets */ static int *saved_len; static int any_cracked, *cracked; static size_t cracked_size; static struct custom_salt { int type; unsigned char salt[16]; unsigned char verifier[16]; /* or encryptedVerifier */ unsigned char verifierHash[20]; /* or encryptedVerifierHash */ unsigned int has_mitm; unsigned char mitm[5]; /* Meet-in-the-middle hint, if we have one */ } *cur_salt; static struct custom_salt cs; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif if (pers_opts.target_enc == UTF_8) self->params.plaintext_length = 3 * PLAINTEXT_LENGTH > 125 ? 125 : 3 * PLAINTEXT_LENGTH; saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, sizeof(UTF16)); saved_len = mem_calloc_tiny(sizeof(*saved_len) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); any_cracked = 0; cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt; cracked = mem_calloc_tiny(cracked_size, MEM_ALIGN_WORD); cur_salt = &cs; } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *ptr, *keeptr; int res; if (strncmp(ciphertext, FORMAT_TAG, TAG_LEN)) return 0; if (strlen(ciphertext) > CIPHERTEXT_LENGTH) return 0; if (!(ctcopy = strdup(ciphertext))) return 0; keeptr = ctcopy; ctcopy += TAG_LEN; if (!(ptr = strtok(ctcopy, "*"))) /* type */ goto error; res = atoi(ptr); if (res > 4) goto error; if (!(ptr = strtok(NULL, "*"))) /* salt */ goto error; if (strlen(ptr) != 32) goto error; if (!ishex(ptr)) goto error; if (!(ptr = strtok(NULL, "*"))) /* verifier */ goto error; if (strlen(ptr) != 32) goto error; if (!ishex(ptr)) goto error; if (!(ptr = strtok(NULL, "*"))) /* verifier hash */ goto error; if (strlen(ptr) != 32 && strlen(ptr) != 40) goto error; if (!ishex(ptr)) goto error; MEM_FREE(keeptr); return 1; error: MEM_FREE(keeptr); return 0; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[CIPHERTEXT_LENGTH]; strnzcpy(out, ciphertext, sizeof(out)); strlwr(out); return out; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; memset(&cs, 0, sizeof(cs)); ctcopy += TAG_LEN; /* skip over "$oldoffice$" */ p = strtok(ctcopy, "*"); cs.type = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < 16; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); for (i = 0; i < 16; i++) cs.verifier[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); if(cs.type < 3) { for (i = 0; i < 16; i++) cs.verifierHash[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } else { for (i = 0; i < 20; i++) cs.verifierHash[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } if ((p = strtok(NULL, "*"))) { cs.has_mitm = 1; for (i = 0; i < 5; i++) cs.mitm[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } else cs.has_mitm = 0; MEM_FREE(keeptr); return (void *)&cs; } #if 0 static char *source(char *source, void *binary) { static char Buf[CIPHERTEXT_LENGTH]; unsigned char *cpi, *cp = (unsigned char*)Buf; int i, len; cp += sprintf(Buf, "%s%d*", FORMAT_TAG, cur_salt->type); cpi = cur_salt->salt; for (i = 0; i < 16; i++) { *cp++ = itoa16[*cpi >> 4]; *cp++ = itoa16[*cpi & 0xf]; cpi++; } *cp++ = '*'; cpi = cur_salt->verifier; for (i = 0; i < 16; i++) { *cp++ = itoa16[*cpi >> 4]; *cp++ = itoa16[*cpi & 0xf]; cpi++; } *cp++ = '*'; len = (cur_salt->type < 3) ? 16 : 20; cpi = cur_salt->verifierHash; for (i = 0; i < len; i++) { *cp++ = itoa16[*cpi >> 4]; *cp++ = itoa16[*cpi & 0xf]; cpi++; } if (cur_salt->has_mitm) { *cp++ = '*'; cpi = cur_salt->mitm; for (i = 0; i < 5; i++) { *cp++ = itoa16[*cpi >> 4]; *cp++ = itoa16[*cpi & 0xf]; cpi++; } } *cp = 0; return Buf; } #endif static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { int i; RC4_KEY key; if(cur_salt->type < 3) { MD5_CTX ctx; unsigned char mid_key[16]; unsigned char pwdHash[16]; unsigned char hashBuf[21 * 16]; MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], saved_len[index]); MD5_Final(mid_key, &ctx); for (i = 0; i < 16; i++) { memcpy(hashBuf + i * 21, mid_key, 5); memcpy(hashBuf + i * 21 + 5, cur_salt->salt, 16); } MD5_Init(&ctx); MD5_Update(&ctx, hashBuf, 21 * 16); MD5_Final(mid_key, &ctx); // Early reject if we got a hint if (cur_salt->has_mitm && memcmp(mid_key, cur_salt->mitm, 5)) continue; memcpy(hashBuf, mid_key, 5); memset(hashBuf + 5, 0, 4); MD5_Init(&ctx); MD5_Update(&ctx, hashBuf, 9); MD5_Final(pwdHash, &ctx); RC4_set_key(&key, 16, pwdHash); /* rc4Key */ RC4(&key, 16, cur_salt->verifier, hashBuf); /* encryptedVerifier */ RC4(&key, 16, cur_salt->verifierHash, hashBuf + 16); /* encryptedVerifierHash */ /* hash the decrypted verifier */ MD5_Init(&ctx); MD5_Update(&ctx, hashBuf, 16); MD5_Final(pwdHash, &ctx); if(!memcmp(pwdHash, hashBuf + 16, 16)) { #ifdef _OPENMP #pragma omp critical #endif { any_cracked = cracked[index] = 1; cur_salt->has_mitm = 1; memcpy(cur_salt->mitm, mid_key, 5); } } } else { SHA_CTX ctx; unsigned char H0[24]; unsigned char mid_key[20]; unsigned char Hfinal[20]; unsigned char DecryptedVerifier[16]; unsigned char DecryptedVerifierHash[20]; SHA1_Init(&ctx); SHA1_Update(&ctx, cur_salt->salt, 16); SHA1_Update(&ctx, saved_key[index], saved_len[index]); SHA1_Final(H0, &ctx); memset(&H0[20], 0, 4); SHA1_Init(&ctx); SHA1_Update(&ctx, H0, 24); SHA1_Final(mid_key, &ctx); // Early reject if we got a hint if (cur_salt->has_mitm && memcmp(mid_key, cur_salt->mitm, 5)) continue; if(cur_salt->type < 4) { memcpy(Hfinal, mid_key, 5); memset(&Hfinal[5], 0, 11); } else memcpy(Hfinal, mid_key, 20); RC4_set_key(&key, 16, Hfinal); /* dek */ RC4(&key, 16, cur_salt->verifier, DecryptedVerifier); RC4(&key, 20, cur_salt->verifierHash, DecryptedVerifierHash); SHA1_Init(&ctx); SHA1_Update(&ctx, DecryptedVerifier, 16); SHA1_Final(Hfinal, &ctx); if(!memcmp(Hfinal, DecryptedVerifierHash, 16)) { #ifdef _OPENMP #pragma omp critical #endif { any_cracked = cracked[index] = 1; cur_salt->has_mitm = 1; memcpy(cur_salt->mitm, mid_key, 5); } } } } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static void set_key(char *key, int index) { /* convert key to UTF-16LE */ saved_len[index] = enc_to_utf16(saved_key[index], PLAINTEXT_LENGTH, (UTF8*)key, strlen(key)); if (saved_len[index] < 0) saved_len[index] = strlen16(saved_key[index]); saved_len[index] <<= 1; } static char *get_key(int index) { return (char*)utf16_to_enc(saved_key[index]); } #if FMT_MAIN_VERSION > 11 static unsigned int oo_hash_type(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->type; } #endif struct fmt_main fmt_oldoffice = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE, #if FMT_MAIN_VERSION > 11 { "hash type", }, #endif oo_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { oo_hash_type, }, #endif fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_subassign_04.c
//------------------------------------------------------------------------------ // GB_subassign_04: C(I,J) += A ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 04: C(I,J) += A ; using S // M: NULL // Mask_comp: false // C_replace: false // accum: present // A: matrix // S: constructed // C: not bitmap: use GB_bitmap_assign instead // A: any sparsity structure. #include "GB_subassign_methods.h" GrB_Info GB_subassign_04 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_BinaryOp accum, const GrB_Matrix A, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_CLEAR_STATIC_HEADER (S, &S_header) ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_MATRIX_WAIT_IF_JUMBLED (A) ; GB_GET_C ; // C must not be bitmap GB_GET_A ; GB_GET_S ; GB_GET_ACCUM ; //-------------------------------------------------------------------------- // Method 04: C(I,J) += A ; using S //-------------------------------------------------------------------------- // Time: Close to Optimal. Every entry in A must be visited, and the // corresponding entry in S must then be found. Time for this phase is // Omega(nnz(A)), but S has already been constructed, in Omega(nnz(S)) // time. This method simply traverses all of A+S (like GB_add for // computing A+S), the same as Method 02. Time taken is O(nnz(A)+nnz(S)). // The only difference is that the traversal of A+S can terminate if A is // exhausted. Entries in S but not A do not actually require any work // (unlike Method 02, which must visit all entries in A+S). // Method 02 and Method 04 are somewhat similar. They differ on how C is // modified when the entry is present in S but not A. // TODO: phase2 of Method 02 and 04 are identical and could be // done in a single function. // Compare with Method 16, which computes C(I,J)<!M> += A, using S. //-------------------------------------------------------------------------- // Parallel: A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- if (A_is_bitmap) { // all of IxJ must be examined GB_SUBASSIGN_IXJ_SLICE ; } else { // traverse all A+S GB_SUBASSIGN_TWO_SLICE (A, S) ; } //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- if (A_is_bitmap) { //---------------------------------------------------------------------- // phase1: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (Sfound && !Afound) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( C ): no change, with accum // [X . 1]: action: ( X ): still a zombie GB_NEXT (S) ; } else if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; } else if (Sfound && Afound) { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_matrix ; GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase1: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE1 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( C ): no change, with accum // [X . 1]: action: ( X ): still a zombie GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; GB_NEXT (A) ; } else { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_matrix ; GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // List A (:,j) has entries. List S (:,j) exhausted. task_pending += (pA_end - pA) ; } GB_PHASE1_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; if (A_is_bitmap) { //---------------------------------------------------------------------- // phase2: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } else if (Sfound) { // S (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase2: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE2 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } else { GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // while list A (:,j) has entries. List S (:,j) exhausted. while (pA < pA_end) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iA = GBI (Ai, pA, Avlen) ; int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } } GB_PHASE2_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
Par-09-ParallelConsecutiveForLoops.c
int main(int argc, char **argv) { int a[4] = {1,2,3,4}; int b[4] = {0, 0, 0, 0}; #pragma omp parallel { #pragma omp for for (int i = 0; i < 4; ++i) { a[i] = 3*a[i]; } #pragma omp for for (int j = 0; j < 4; ++j) { b[j] = a[j]; } } return 0; }
hnswalg.h
#pragma once #include "hnswlib.h" #include "visited_list_pool.h" #include <random> #include <iostream> #include <fstream> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unordered_set> #include <unordered_map> #include <array> #include <map> #include <cmath> #include <queue> template<typename T> static void writeBinaryPOD(std::ostream &out, const T &podRef) { out.write((char *) &podRef, sizeof(T)); } template<typename T> static void readBinaryPOD(std::istream &in, T &podRef) { in.read((char *) &podRef, sizeof(T)); } #define DEBUG_LIB 1 namespace hnswlib { typedef unsigned int tableint; typedef unsigned char linklistsizeint; template<typename dist_t, typename vtype> class HierarchicalNSW { public: HierarchicalNSW(SpaceInterface<dist_t> *s) {} HierarchicalNSW(SpaceInterface<dist_t> *s, const string &infoLocation, const string &dataLocation, const string &edgeLocation, bool nmslib = false) { LoadInfo(infoLocation, s); LoadData(dataLocation); LoadEdges(edgeLocation); } HierarchicalNSW(SpaceInterface<dist_t> *s, size_t maxelements, size_t M, size_t maxM, size_t efConstruction = 200) { space = s; data_size_ = s->get_data_size(); efConstruction_ = efConstruction; maxelements_ = maxelements; M_ = M; maxM_ = maxM; size_links_level0 = maxM * sizeof(tableint) + sizeof(linklistsizeint); size_data_per_element = size_links_level0 + data_size_; offsetData = size_links_level0; std::cout << (data_level0_memory_ ? 1 : 0) << std::endl; data_level0_memory_ = (char *) malloc(maxelements_ * size_data_per_element); std::cout << (data_level0_memory_ ? 1 : 0) << std::endl; cout << "Size Mb: " << (maxelements_ * size_data_per_element) / (1000 * 1000) << "\n"; cur_element_count = 0; visitedlistpool = new VisitedListPool(1, maxelements_); //initializations for special treatment of the first node enterpoint_node = -1; maxlevel_ = -1; elementLevels = vector<char>(maxelements_); for (size_t i = 0; i < maxelements_; ++i) elementLevels[i] = 0; } ~HierarchicalNSW() { free(data_level0_memory_); delete visitedlistpool; } // Fields SpaceInterface<dist_t> *space; size_t maxelements_; size_t cur_element_count; size_t efConstruction_; int maxlevel_; VisitedListPool *visitedlistpool; mutex cur_element_count_guard_; mutex MaxLevelGuard_; tableint enterpoint_node; size_t dist_calc; char *data_level0_memory_; vector<char> elementLevels; size_t data_size_; size_t offsetData; size_t size_data_per_element; size_t M_; size_t maxM_; size_t size_links_level0; inline char *getDataByInternalId(tableint internal_id) const { return (data_level0_memory_ + internal_id * size_data_per_element + offsetData); } inline linklistsizeint *get_linklist0(tableint internal_id) const { return (linklistsizeint *) (data_level0_memory_ + internal_id * size_data_per_element); }; std::priority_queue<std::pair<dist_t, tableint >> searchBaseLayer(tableint ep, void *datapoint, int level, int ef) { VisitedList *vl = visitedlistpool->getFreeVisitedList(); vl_type *massVisited = vl->mass; vl_type currentV = vl->curV; std::priority_queue<std::pair<dist_t, tableint >> topResults; std::priority_queue<std::pair<dist_t, tableint >> candidateSet; dist_t dist = space->fstdistfunc(datapoint, getDataByInternalId(ep)); topResults.emplace(dist, ep); candidateSet.emplace(-dist, ep); massVisited[ep] = currentV; dist_t lowerBound = dist; while (!candidateSet.empty()) { std::pair<dist_t, tableint> curr_el_pair = candidateSet.top(); if ((-curr_el_pair.first) > lowerBound) { break; } candidateSet.pop(); tableint curNodeNum = curr_el_pair.second; linklistsizeint *ll_cur = get_linklist0(curNodeNum); linklistsizeint size = *ll_cur; tableint *data = (tableint *) (ll_cur + 1); _mm_prefetch(getDataByInternalId(*data), _MM_HINT_T0); for (linklistsizeint j = 0; j < size; ++j) { tableint tnum = *(data + j); _mm_prefetch(getDataByInternalId(*(data + j + 1)), _MM_HINT_T0); if (!(massVisited[tnum] == currentV)) { massVisited[tnum] = currentV; dist_t dist = space->fstdistfunc(datapoint, getDataByInternalId(tnum)); if (topResults.top().first > dist || topResults.size() < ef) { candidateSet.emplace(-dist, tnum); _mm_prefetch(getDataByInternalId(candidateSet.top().second), _MM_HINT_T0); topResults.emplace(dist, tnum); if (topResults.size() > ef) { topResults.pop(); } lowerBound = topResults.top().first; } } } } visitedlistpool->releaseVisitedList(vl); return topResults; } struct CompareByFirst { constexpr bool operator()(pair<dist_t, tableint> const &a, pair<dist_t, tableint> const &b) const noexcept { return a.first < b.first; } }; std::priority_queue<std::pair<dist_t, tableint>, vector<pair<dist_t, tableint>>, CompareByFirst> searchBaseLayerST(tableint ep, void *datapoint, size_t ef) { VisitedList *vl = visitedlistpool->getFreeVisitedList(); vl_type *massVisited = vl->mass; vl_type currentV = vl->curV; std::priority_queue<std::pair<dist_t, tableint>, vector<pair<dist_t, tableint>>, CompareByFirst> topResults; std::priority_queue<std::pair<dist_t, tableint>, vector<pair<dist_t, tableint>>, CompareByFirst> candidateSet; dist_t dist = space->fstdistfunc(datapoint, getDataByInternalId(ep)); dist_calc++; topResults.emplace(dist, ep); candidateSet.emplace(-dist, ep); massVisited[ep] = currentV; dist_t lowerBound = dist; while (!candidateSet.empty()) { hops0 += 1.0 / 100000; std::pair<dist_t, tableint> curr_el_pair = candidateSet.top(); if (-curr_el_pair.first > lowerBound) break; candidateSet.pop(); tableint curNodeNum = curr_el_pair.second; linklistsizeint *ll_cur = get_linklist0(curNodeNum); linklistsizeint size = *ll_cur; tableint *data = (tableint *)(ll_cur + 1); _mm_prefetch((char *) (massVisited + *data), _MM_HINT_T0); _mm_prefetch((char *) (massVisited + *data + 64), _MM_HINT_T0); _mm_prefetch(getDataByInternalId(*data), _MM_HINT_T0); for (linklistsizeint j = 0; j < size; ++j) { int tnum = *(data + j); _mm_prefetch((char *) (massVisited + *(data + j + 1)), _MM_HINT_T0); _mm_prefetch(getDataByInternalId(*(data + j + 1)), _MM_HINT_T0); if (!(massVisited[tnum] == currentV)) { massVisited[tnum] = currentV; dist_t dist = space->fstdistfunc(datapoint, getDataByInternalId(tnum)); dist_calc++; if (topResults.top().first > dist || topResults.size() < ef) { candidateSet.emplace(-dist, tnum); _mm_prefetch(get_linklist0(candidateSet.top().second), _MM_HINT_T0); topResults.emplace(dist, tnum); if (topResults.size() > ef) topResults.pop(); lowerBound = topResults.top().first; } } } } visitedlistpool->releaseVisitedList(vl); return topResults; } void getNeighborsByHeuristic(std::priority_queue<std::pair<dist_t, tableint>> &topResults, const int NN) { if (topResults.size() < NN) return; std::priority_queue<std::pair<dist_t, tableint>> resultSet; std::priority_queue<std::pair<dist_t, tableint>> templist; vector<std::pair<dist_t, tableint>> returnlist; while (topResults.size() > 0) { resultSet.emplace(-topResults.top().first, topResults.top().second); topResults.pop(); } while (resultSet.size()) { if (returnlist.size() >= NN) break; std::pair<dist_t, tableint> curen = resultSet.top(); dist_t dist_to_query = -curen.first; resultSet.pop(); bool good = true; for (std::pair<dist_t, tableint> curen2 : returnlist) { dist_t curdist = space->fstdistfunc(getDataByInternalId(curen2.second), getDataByInternalId(curen.second)); if (curdist < dist_to_query) { good = false; break; } } if (good) returnlist.push_back(curen); } for (std::pair<dist_t, tableint> curen2 : returnlist) topResults.emplace(-curen2.first, curen2.second); } void mutuallyConnectNewElement(void *datapoint, tableint cur_c, std::priority_queue<std::pair<dist_t, tableint>> topResults, int level) { size_t curMmax = maxM_; size_t curM = M_; getNeighborsByHeuristic(topResults, curM); while (topResults.size() > curM) { throw exception(); } vector<tableint> rez; rez.reserve(curM); while (topResults.size() > 0) { rez.push_back(topResults.top().second); topResults.pop(); } { linklistsizeint *ll_cur = get_linklist0(cur_c); if (*ll_cur) { cout << *ll_cur << "\n"; cout << (int) elementLevels[cur_c] << "\n"; cout << level << "\n"; throw runtime_error("Should be blank"); } *ll_cur = rez.size(); tableint *data = (tableint *)(ll_cur + 1); for (int idx = 0; idx < rez.size(); idx++) { if (data[idx]) throw runtime_error("Should be blank"); if (level > elementLevels[rez[idx]]) throw runtime_error("Bad level"); data[idx] = rez[idx]; } } for (int idx = 0; idx < rez.size(); idx++) { if (rez[idx] == cur_c) throw runtime_error("Connection to the same element"); size_t rezMmax = maxM_; linklistsizeint *ll_other = get_linklist0(rez[idx]); if (level > elementLevels[rez[idx]]) throw runtime_error("Bad level"); linklistsizeint sz_link_list_other = *ll_other; if (sz_link_list_other > rezMmax || sz_link_list_other < 0) throw runtime_error("Bad sz_link_list_other"); if (sz_link_list_other < rezMmax) { tableint *data = (tableint *) (ll_other + 1); data[sz_link_list_other] = cur_c; *ll_other = sz_link_list_other + 1; } else { // finding the "weakest" element to replace it with the new one tableint *data = (tableint *) (ll_other + 1); dist_t d_max = space->fstdistfunc(getDataByInternalId(cur_c), getDataByInternalId(rez[idx])); // Heuristic: std::priority_queue<std::pair<dist_t, tableint>> candidates; candidates.emplace(d_max, cur_c); for (int j = 0; j < sz_link_list_other; j++) candidates.emplace(space->fstdistfunc(getDataByInternalId(data[j]), getDataByInternalId(rez[idx])), data[j]); getNeighborsByHeuristicMerge(candidates, rezMmax); int indx = 0; while (candidates.size() > 0) { data[indx] = candidates.top().second; candidates.pop(); indx++; } *ll_other = indx; } } } mutex global; size_t ef_; // My float nev9zka = 0.0; tableint enterpoint0; float hops0 = 0.0; void addPoint(void *datapoint, labeltype label) { tableint cur_c = 0; { unique_lock <mutex> lock(cur_element_count_guard_); if (cur_element_count >= maxelements_) { cout << "The number of elements exceeds the specified limit\n"; throw runtime_error("The number of elements exceeds the specified limit"); }; cur_c = cur_element_count; cur_element_count++; } int curlevel = elementLevels[cur_c]; unique_lock <mutex> templock(global); int maxlevelcopy = maxlevel_; if (curlevel <= maxlevelcopy) templock.unlock(); memset((char *) get_linklist0(cur_c), 0, size_data_per_element); memcpy(getDataByInternalId(cur_c), datapoint, data_size_); tableint currObj = enterpoint_node; if (currObj != -1) { if (curlevel < maxlevelcopy) { dist_t curdist = space->fstdistfunc(datapoint, getDataByInternalId(currObj)); for (int level = maxlevelcopy; level > curlevel; level--) { bool changed = true; while (changed) { changed = false; linklistsizeint *data = get_linklist0(currObj); linklistsizeint size = *data; tableint *datal = (tableint *) (data + 1); for (linklistsizeint i = 0; i < size; i++) { tableint cand = datal[i]; if (cand < 0 || cand > maxelements_) throw runtime_error("cand error"); dist_t d = space->fstdistfunc(datapoint, getDataByInternalId(cand)); if (d < curdist) { curdist = d; currObj = cand; changed = true; } } } } } for (int level = 0; level <= min(curlevel, maxlevelcopy); level++) { if (level > maxlevelcopy || level < 0) throw runtime_error("Level error"); std::priority_queue<std::pair<dist_t, tableint>> topResults = searchBaseLayer(currObj, datapoint, level, efConstruction_); mutuallyConnectNewElement(datapoint, cur_c, topResults, level); } } else { // Do nothing for the first element enterpoint_node = 0; maxlevel_ = curlevel; } //Releasing lock for the maximum level if (curlevel > maxlevelcopy) { enterpoint_node = cur_c; maxlevel_ = curlevel; } }; std::priority_queue<std::pair<dist_t, labeltype >> searchKnn(void *query_data, int k, int q_idx = -1) { tableint currObj = enterpoint_node; dist_t curdist = space->fstdistfunc(query_data, getDataByInternalId(enterpoint_node)); dist_calc++; for (int level = maxlevel_; level > 0; level--) { bool changed = true; while (changed) { changed = false; linklistsizeint *data = get_linklist0(currObj); linklistsizeint size = *data; tableint *datal = (tableint *) (data + 1); for (linklistsizeint i = 0; i < size; i++) { tableint cand = datal[i]; if (cand < 0 || cand > maxelements_) throw runtime_error("cand error"); dist_t d = space->fstdistfunc(query_data, getDataByInternalId(cand)); dist_calc++; if (d < curdist) { curdist = d; currObj = cand; changed = true; } } } } enterpoint0 = currObj; std::priority_queue<std::pair<dist_t, tableint>, vector<pair<dist_t, tableint>>, CompareByFirst> tmpTopResults = searchBaseLayerST( currObj, query_data, ef_); // Remove clusters as answers std::priority_queue<std::pair<dist_t, tableint >> topResults; while (tmpTopResults.size() > 0) { std::pair<dist_t, tableint> rez = tmpTopResults.top(); topResults.push(rez); tmpTopResults.pop(); } while (topResults.size() > k) topResults.pop(); return topResults; }; void printListsize() { float av_M = 0; int numLinks[32]; for (int i = 0; i < 32; i++) numLinks[i] = 0; for (int i = 0; i < maxelements_; i++){ linklistsizeint *ll_cur = get_linklist0(i); numLinks[*ll_cur - 1]++; av_M += (1.0 * *ll_cur) / maxelements_; } std::cout << "Links distribution" << std::endl; for (int i = 0; i < 32; i++){ cout << " Number of elements with " << i+1 << " links: " << numLinks[i] << endl; } } void SaveInfo(const string &location) { cout << "Saving info to " << location << endl; std::ofstream output(location, std::ios::binary); streampos position; writeBinaryPOD(output, maxelements_); writeBinaryPOD(output, enterpoint_node); writeBinaryPOD(output, data_size_); writeBinaryPOD(output, offsetData); writeBinaryPOD(output, size_data_per_element); writeBinaryPOD(output, M_); writeBinaryPOD(output, maxM_); writeBinaryPOD(output, size_links_level0); output.close(); } void SaveEdges(const string &location) { cout << "Saving edges to " << location << endl; FILE *fout = fopen(location.c_str(), "wb"); for (tableint i = 0; i < maxelements_; i++) { linklistsizeint *ll_cur = get_linklist0(i); int size = *ll_cur; fwrite((int *)&size, sizeof(int), 1, fout); tableint *data = (tableint *)(ll_cur + 1); fwrite(data, sizeof(tableint), *ll_cur, fout); } } void LoadInfo(const string &location, SpaceInterface<dist_t> *s) { cout << "Loading info from " << location << endl; std::ifstream input(location, std::ios::binary); streampos position; space = s; data_size_ = s->get_data_size(); readBinaryPOD(input, maxelements_); readBinaryPOD(input, enterpoint_node); readBinaryPOD(input, data_size_); readBinaryPOD(input, offsetData); readBinaryPOD(input, size_data_per_element); readBinaryPOD(input, M_); readBinaryPOD(input, maxM_); readBinaryPOD(input, size_links_level0); data_level0_memory_ = (char *) malloc(maxelements_ * size_data_per_element); efConstruction_ = 0; cur_element_count = maxelements_; visitedlistpool = new VisitedListPool(1, maxelements_); elementLevels = vector<char>(maxelements_); for (size_t i = 0; i < maxelements_; ++i) elementLevels[i] = 0; maxlevel_ = 0; cout << "Predicted size=" << maxelements_ * size_data_per_element / (1000 * 1000) << "\n"; input.close(); } void LoadData(const string &location) { cout << "Loading data from " << location << endl; FILE *fin = fopen(location.c_str(), "rb"); int dim; const int D = space->get_data_dim(); vtype mass[D]; for (tableint i = 0; i < maxelements_; i++) { fread((int *) &dim, sizeof(int), 1, fin); if (dim != D) cerr << "Wront data dim" << endl; fread(mass, sizeof(vtype), dim, fin); memset((char *) get_linklist0(i), 0, size_data_per_element); memcpy(getDataByInternalId(i), mass, data_size_); } } void LoadEdges(const string &location) { cout << "Loading edges from " << location << endl; FILE *fin = fopen(location.c_str(), "rb"); int size; for (tableint i = 0; i < maxelements_; i++) { fread((int *)&size, sizeof(int), 1, fin); linklistsizeint *ll_cur = get_linklist0(i); *ll_cur = size; tableint *data = (tableint *)(ll_cur + 1); fread((tableint *)data, sizeof(tableint), size, fin); } } void getNeighborsByHeuristicMerge(std::priority_queue<std::pair<dist_t, tableint>> &topResults, const int NN) { if (topResults.size() < NN) return; std::priority_queue<std::pair<dist_t, tableint>> resultSet; std::priority_queue<std::pair<dist_t, tableint>> templist; std::vector<std::pair<dist_t, tableint>> returnlist; while (topResults.size() > 0) { resultSet.emplace(-topResults.top().first, topResults.top().second); topResults.pop(); } while (resultSet.size()) { if (returnlist.size() >= NN) break; std::pair<dist_t, tableint> curen = resultSet.top(); dist_t dist_to_query = -curen.first; resultSet.pop(); bool good = true; for (std::pair<dist_t, tableint> curen2 : returnlist) { dist_t curdist = space->fstdistfunc(getDataByInternalId(curen2.second), getDataByInternalId(curen.second)); if (curdist < dist_to_query) { good = false; break; } } if (good) returnlist.push_back(curen); else templist.emplace(curen); } while (returnlist.size() < NN && templist.size() > 0) { returnlist.push_back(templist.top()); templist.pop(); } for (std::pair<dist_t, tableint> curen2 : returnlist) topResults.emplace(-curen2.first, curen2.second); } void merge(const HierarchicalNSW<dist_t, vtype> *hnsw) { int counter = 0; //#pragma omp parallel for for (int i = 0; i < maxelements_; i++){ float *data = (float *) getDataByInternalId(i); linklistsizeint *ll1 = get_linklist0(i); linklistsizeint *ll2 = hnsw->get_linklist0(maxelements_- 1 - i); float identity = space->fstdistfunc((void *)data, (void *)hnsw->getDataByInternalId(maxelements_- 1 - i)); if (identity > 0.0000001){ std::cout << "Merging different points\n"; exit(1); } size_t size1 = *ll1; size_t size2 = *ll2; labeltype *links1 = (labeltype *)(ll1 + 1); labeltype *links2 = (labeltype *)(ll2 + 1); std::unordered_set<labeltype> links; for (labeltype link = 0; link < size1; link++) links.insert(links1[link]); for (labeltype link = 0; link < size2; link++) links.insert(maxelements_- 1 - links2[link]); if (links.size() <= maxM_){ int indx = 0; for (labeltype link : links) links1[indx++] = link; *ll1 = indx; } else { std::priority_queue<std::pair<dist_t, tableint>> topResults; for (labeltype link : links){ float *point = (float *) getDataByInternalId(link); dist_t dist = space->fstdistfunc((void *)data, (void *)point); topResults.emplace(std::make_pair(dist, link)); } getNeighborsByHeuristicMerge(topResults, maxM_); int indx = 0; while (topResults.size() > 0) { links1[indx++] = topResults.top().second; topResults.pop(); } *ll1 = indx; } if (*ll1 < maxM_) counter++; } std::cout << counter << std::endl; } }; }
misc.h
/** * \file misc.h * \brief Helper functions. * * \author Gregory Bard <bard@fordham.edu> * \author Martin Albrecht <M.R.Albrecht@rhul.ac.uk> */ #ifndef MISC_H #define MISC_H /******************************************************************* * * M4RI: Linear Algebra over GF(2) * * Copyright (C) 2007, 2008 Gregory Bard <bard@fordham.edu> * Copyright (C) 2008 Martin Albrecht <M.R.Albrecht@rhul.ac.uk> * * Distributed under the terms of the GNU General Public License (GPL) * version 2 or higher. * * This code is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full text of the GPL is available at: * * http://www.gnu.org/licenses/ * ********************************************************************/ #ifdef HAVE_CONFIG_H #include "config.h" #endif #ifdef HAVE_MM_MALLOC #include <mm_malloc.h> #endif #include <stdlib.h> #include <assert.h> #include <string.h> /* * These define entirely the word width used in the library. */ /** * A word is the typical packed data structure to represent packed * bits. */ typedef unsigned long long word; /** * \brief The number of bits in a word. */ #define RADIX (sizeof(word)<<3) /** * \brief The number one as a word. */ #define ONE ((word)1) /** * \brief The number 2^64-1 as a word. */ #define FFFF ((word)0xffffffffffffffffull) /** * \brief Return the maximal element of x and y * * \param x Word * \param y Word */ #ifndef MAX #define MAX(x,y) (((x) > (y))?(x):(y)) #endif /** * \brief Return the minimal element of x and y * * \param x Word * \param y Word */ #ifndef MIN #define MIN(x,y) (((x) < (y))?(x):(y)) #endif /** * \brief Return r such that x elements fit into r blocks of length y. * * \param x Number of elements * \param y Block size */ #define DIV_CEIL(x,y) (((x)%(y))?(x)/(y)+1:(x)/(y)) /** *\brief Pretty for 1. */ #define TRUE 1 /** *\brief Pretty for 0. */ #define FALSE 0 /** * \brief $2^i$ * * \param i Integer. */ #define TWOPOW(i) (ONE<<(i)) /** * \brief Pretty for unsigned char. */ typedef unsigned char BIT; /** * \brief Clear the bit spot (counting from the left) in the word w * * \param w Word * \param spot Integer with 0 <= spot < RADIX */ #define CLR_BIT(w, spot) ((w) &= ~(ONE<<(RADIX - (spot) - 1))) /** * \brief Set the bit spot (counting from the left) in the word w * * \param w Word * \param spot Integer with 0 <= spot < RADIX */ #define SET_BIT(w, spot) ((w) |= (ONE<<(RADIX - (spot) - 1))) /** * \brief Get the bit spot (counting from the left) in the word w * * \param w Word * \param spot Integer with 0 <= spot < RADIX */ #define GET_BIT(w, spot) (((w) & (ONE<<(RADIX - (spot) - 1))) >> (RADIX - (spot) - 1)) /** * \brief Write the value to the bit spot in the word w * * \param w Word. * \param spot Integer with 0 <= spot < RADIX. * \param value Either 0 or 1. */ #define WRITE_BIT(w, spot, value) ((w) = (((w) &~(ONE<<(RADIX - (spot) - 1))) | (((word)(value))<<(RADIX - (spot) - 1)))) /** * \brief Flip the spot in the word w * * \param w Word. * \param spot Integer with 0 <= spot < RADIX. */ #define FLIP_BIT(w, spot) ((w) ^= (ONE<<(RADIX - (spot) - 1))) /** * \brief Return the n leftmost bits of the word w. * * \param w Word * \param n Integer with 0 <= spot < RADIX */ #define LEFTMOST_BITS(w, n) ((w) & ~((ONE<<(RADIX-(n)))-1))>>(RADIX-(n)) /** * \brief Return the n rightmost bits of the word w. * * \param w Word * \param n Integer with 0 <= spot < RADIX */ #define RIGHTMOST_BITS(w, n) (((w)<<(RADIX-(n)-1))>>(RADIX-(n)-1)) /** * \brief creat a bit mask to zero out all but the n%RADIX leftmost * bits. * * \param n Integer */ #define LEFT_BITMASK(n) (~((ONE << ((RADIX - (n % RADIX))%RADIX) ) - 1)) /** * \brief creat a bit mask to zero out all but the n%RADIX rightmost * bits. * * \param n Integer * * \warning Does not handle multiples of RADIX correctly */ #define RIGHT_BITMASK(n) (FFFF>>( (RADIX - (n%RADIX))%RADIX )) /** * \brief creat a bit mask to zero out all but the n%RADIX bit. * * \param n Integer * */ #define BITMASK(n) (ONE<<(RADIX-((n)%RADIX)-1)) /** * \brief Return alignment of addr w.r.t. n. For example the address * 17 would be 1 aligned w.r.t. 16. * * \param addr * \param n */ #define ALIGNMENT(addr, n) (((unsigned long)(addr))%(n)) /** * Return the index of the leftmost bit in a for a nonzero. * * \param a Word */ static inline int leftmost_bit(word a) { int r = 0; if(a>>32) r+=32, a>>=32; if(a>>16) r+=16, a>>=16; if(a>>8) r+=8, a>>=8; if(a>>4) r+=4, a>>=4; if(a>>2) r+=2, a>>=2; if(a>>1) r+=1, a>>=1; if(a) r+=1, a>>=1; return r; } /**** Error Handling *****/ /** * \brief Print error message and abort(). * * The function accepts additional * parameters like printf, so e.g. m4ri_die("foo %d bar %f\n",1 ,2.0) * is valid and will print the string "foo 1 bar 2.0" before dying. * * \param errormessage a string to be printed. * * \todo Allow user to register callback which is called on * m4ri_die(). * * \warning The provided string is not free'd. */ void m4ri_die(const char *errormessage, ...); /**** IO *****/ /** * \brief Write a sting representing the word data to destination. * * \param destination Address of buffer of length at least RADIX*1.3 * \param data Source word * \param colon Insert a Colon after every 4-th bit. * \warning Assumes destination has RADIX*1.3 bytes available */ void m4ri_word_to_str( char *destination, word data, int colon); /** * \brief Return 1 or 0 uniformly randomly distributed. * * \todo Allow user to provide her own random() function. */ //BIT m4ri_coin_flip(void); static inline BIT m4ri_coin_flip() { if (rand() < RAND_MAX/2) { return 0; } else { return 1; } } /** * \brief Return uniformly randomly distributed random word. * * \todo Allow user to provide her own random() function. */ word m4ri_random_word(); /***** Initialization *****/ /** * \brief Initialize global data structures for the M4RI library. * * On Linux/Solaris this is called automatically when the shared * library is loaded, but it doesn't harm if it is called twice. */ #if defined(__GNUC__) void __attribute__ ((constructor)) m4ri_init(void); #else void m4ri_init(void); #endif #ifdef __SUNPRO_C #pragma init(m4ri_init) #endif /** * \brief De-initialize global data structures from the M4RI library. * * On Linux/Solaris this is called automatically when the shared * library is unloaded, but it doesn't harm if it is called twice. */ #if defined(__GNUC__) void __attribute__ ((destructor)) m4ri_fini(void); #else void m4ri_fini(void); #endif #ifdef __SUNPRO_C #pragma fini(m4ri_fini) #endif /***** Memory Management *****/ #if CPU_L2_CACHE == 0 /** * Fix some standard value for L2 cache size if it couldn't be * determined by configure. */ #undef CPU_L2_CACHE #define CPU_L2_CACHE 524288 #endif //CPU_L2_CACHE #if CPU_L1_CACHE == 0 /** * Fix some standard value for L1 cache size if it couldn't be * determined by configure. */ #undef CPU_L1_CACHE #define CPU_L1_CACHE 16384 #endif //CPU_L1_CACHE /** * \brief Calloc wrapper. * * \param count Number of elements. * \param size Size of each element. * * \todo Allow user to register calloc function. */ /* void *m4ri_mm_calloc( int count, int size ); */ static inline void *m4ri_mm_calloc( int count, int size ) { void *newthing; #ifdef HAVE_OPENMP #pragma omp critical { #endif #ifdef HAVE_MM_MALLOC newthing = _mm_malloc(count*size, 16); #else newthing = calloc(count, size); #endif #ifdef HAVE_OPENMP } #endif if (newthing==NULL) { m4ri_die("m4ri_mm_calloc: calloc returned NULL\n"); return NULL; /* unreachable. */ } #ifdef HAVE_MM_MALLOC char *b = (char*)newthing; memset(b, 0, count*size); #endif return newthing; } /** * \brief Malloc wrapper. * * \param size Size in bytes. * * \todo Allow user to register malloc function. */ /* void *m4ri_mm_malloc( int size ); */ static inline void *m4ri_mm_malloc( int size ) { void *newthing; #ifdef HAVE_OPENMP #pragma omp critical { #endif #ifdef HAVE_MM_MALLOC newthing = _mm_malloc(size, 16); #else newthing = malloc( size ); #endif #ifdef HAVE_OPENMP } #endif if (newthing==NULL && (size>0)) { m4ri_die("m4ri_mm_malloc: malloc returned NULL\n"); return NULL; /* unreachable */ } else return newthing; } /** * \brief Free wrapper. * * \param condemned Pointer. * * \todo Allow user to register free function. */ /* void m4ri_mm_free(void *condemned, ...); */ static inline void m4ri_mm_free(void *condemned, ...) { #ifdef HAVE_OPENMP #pragma omp critical { #endif #ifdef HAVE_MM_MALLOC _mm_free(condemned); #else free(condemned); #endif #ifdef HAVE_OPENMP } #endif } /** * \brief Maximum number of bytes allocated in one malloc() call. */ #define MM_MAX_MALLOC ((1ULL)<<30) /** * \brief Enable memory block cache (default: disabled) */ #define ENABLE_MMC /** * \brief Number of blocks that are cached. */ #define M4RI_MMC_NBLOCKS 16 /** * \brief Maximal size of blocks stored in cache. */ #define M4RI_MMC_THRESHOLD CPU_L2_CACHE /** * The mmc memory management functions check a cache for re-usable * unused memory before asking the system for it. */ typedef struct _mm_block { /** * Size in bytes of the data. */ size_t size; /** * Pointer to buffer of data. */ void *data; } mmb_t; /** * The actual memory block cache. */ extern mmb_t m4ri_mmc_cache[M4RI_MMC_NBLOCKS]; /** * \brief Return handle for locale memory management cache. * * \attention Not thread safe. */ static inline mmb_t *m4ri_mmc_handle(void) { return m4ri_mmc_cache; } /** * \brief Allocate size bytes. * * \param size Number of bytes. */ static inline void *m4ri_mmc_malloc(size_t size) { #ifdef HAVE_OPENMP #pragma omp critical { #endif #ifdef ENABLE_MMC mmb_t *mm = m4ri_mmc_handle(); if (size <= M4RI_MMC_THRESHOLD) { size_t i; for (i=0; i<M4RI_MMC_NBLOCKS; i++) { if(mm[i].size == size) { void *ret = mm[i].data; mm[i].data = NULL; mm[i].size = 0; return ret; } } } #endif //ENABLE_MMC #ifdef HAVE_OPENMP } #endif return m4ri_mm_malloc(size); } /** * \brief Allocate size times count zeroed bytes. * * \param size Number of bytes per block. * \param count Number of blocks. * * \warning Not thread safe. */ static inline void *m4ri_mmc_calloc(size_t size, size_t count) { void *ret = m4ri_mmc_malloc(size*count); memset((char*)ret, 0, count*size); return ret; } /** * \brief Free the data pointed to by condemned of the given size. * * \param condemned Pointer to memory. * \param size Number of bytes. * * \warning Not thread safe. */ static inline void m4ri_mmc_free(void *condemned, size_t size) { #ifdef HAVE_OPENMP #pragma omp critical { #endif #ifdef ENABLE_MMC static size_t j = 0; mmb_t *mm = m4ri_mmc_handle(); if (size < M4RI_MMC_THRESHOLD) { size_t i; for(i=0; i<M4RI_MMC_NBLOCKS; i++) { if(mm[i].size == 0) { mm[i].size = size; mm[i].data = condemned; return; } } m4ri_mm_free(mm[j].data); mm[j].size = size; mm[j].data = condemned; j = (j+1) % M4RI_MMC_NBLOCKS; return; } #endif //ENABLE_MMC #ifdef HAVE_OPENMP } #endif m4ri_mm_free(condemned); } /** * \brief Cleans up the cache. * * This function is called automatically when the shared library is * loaded. * * \warning Not thread safe. */ static inline void m4ri_mmc_cleanup(void) { #ifdef HAVE_OPENMP #pragma omp critical { #endif mmb_t *mm = m4ri_mmc_handle(); size_t i; for(i=0; i < M4RI_MMC_NBLOCKS; i++) { if (mm[i].size) m4ri_mm_free(mm[i].data); mm[i].size = 0; } #ifdef HAVE_OPENMP } #endif } #endif //MISC_H
omp_for_collapse.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" /* Utility function to check that i is increasing monotonically with each call */ static int check_i_islarger (int i) { static int last_i; int islarger; if (i==1) last_i=0; islarger = ((i >= last_i)&&(i - last_i<=1)); last_i = i; return (islarger); } int test_omp_for_collapse() { int is_larger = 1; #pragma omp parallel { int i,j; int my_islarger = 1; #pragma omp for private(i,j) schedule(static,1) collapse(2) ordered for (i = 1; i < 100; i++) { for (j =1; j <100; j++) { #pragma omp ordered my_islarger = check_i_islarger(i)&&my_islarger; } } #pragma omp critical is_larger = is_larger && my_islarger; } return (is_larger); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_for_collapse()) { num_failed++; } } return num_failed; }
opi.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> int main(int argc, char **argv) { //seed random number generator // Q2b: get the number of threads to run with from argv and // add OpenMP API code to set number of threads here int Nthreads = atoi(argv[1]); // OpenMP call to set number of threads omp_set_num_threads(Nthreads); struct drand48_data *drandData; drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data)); // Q2c: add an OpenMP parallel region here, wherein each thread initializes // one entry in drandData using srand48_r and seed based on thread number #pragma omp parallel { int rank = omp_get_thread_num(); long int seed = rank; srand48_r(seed, drandData+rank); } long long int Ntrials = 10000000; //need running tallies long long int Ntotal=0; long long int Ncircle=0; // start time double startTime = omp_get_wtime(); #pragma omp parallel for reduction(+:Ncircle) for (long long int n=0; n<Ntrials; n++) { double rand1; double rand2; int rank = omp_get_thread_num(); //generate two random numbers (use the thread id to offset drandData) drand48_r(drandData+rank, &rand1); drand48_r(drandData+rank, &rand2); double x = -1 + 2*rand1; //shift to [-1,1] double y = -1 + 2*rand2; //check if its in the circle if (sqrt(x*x+y*y)<=1) Ncircle++; Ntotal++; if (n%100 ==0) { double pi = 4.0*Ncircle/ (double) (n); printf("Our estimate of pi is %g \n", pi); } // end if } // end for loop // back to serial double pi = 4.0*Ncircle/ (double) (Ntotal); printf("Our final estimate of pi is %g \n", pi); // end time double endTime = omp_get_wtime(); // print total run time double runtime = endTime - startTime; printf("Runtime = %g \n", runtime); // free the data free(drandData); return 0; }
Square.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/Square.c" #else static int nn_(Square_updateOutput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); THTensor_(resizeAs)(output, input); if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output)) { TH_TENSOR_APPLY2(real, output, real, input, \ *output_data = (*input_data) * (*input_data);); } else { real* output_data = THTensor_(data)(output); real* input_data = THTensor_(data)(input); long i; #pragma omp parallel for private(i) for(i = 0; i < THTensor_(nElement)(input); i++) output_data[i] = input_data[i]*input_data[i]; } return 1; } static int nn_(Square_updateGradInput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor); THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor); THTensor_(resizeAs)(gradInput, input); if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(gradOutput) || !THTensor_(isContiguous)(gradInput)) { TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, \ *gradInput_data = 2.0 * (*gradOutput_data) * (*input_data);); } else { real* gradOutput_data = THTensor_(data)(gradOutput); real* gradInput_data = THTensor_(data)(gradInput); real* input_data = THTensor_(data)(input); long i; #pragma omp parallel for private(i) for(i = 0; i < THTensor_(nElement)(gradInput); i++) gradInput_data[i] = 2.0 * gradOutput_data[i] * input_data[i]; } return 1; } static const struct luaL_Reg nn_(Square__) [] = { {"Square_updateOutput", nn_(Square_updateOutput)}, {"Square_updateGradInput", nn_(Square_updateGradInput)}, {NULL, NULL} }; static void nn_(Square_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, nn_(Square__), "nn"); lua_pop(L,1); } #endif
GB_binop__le_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_int64) // A.*B function (eWiseMult): GB (_AemultB_08__le_int64) // A.*B function (eWiseMult): GB (_AemultB_02__le_int64) // A.*B function (eWiseMult): GB (_AemultB_04__le_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_int64) // A*D function (colscale): GB (_AxD__le_int64) // D*A function (rowscale): GB (_DxB__le_int64) // C+=B function (dense accum): GB (_Cdense_accumB__le_int64) // C+=b function (dense accum): GB (_Cdense_accumb__le_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_int64) // C=scalar+B GB (_bind1st__le_int64) // C=scalar+B' GB (_bind1st_tran__le_int64) // C=A+scalar GB (_bind2nd__le_int64) // C=A'+scalar GB (_bind2nd_tran__le_int64) // C type: bool // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_INT64 || GxB_NO_LE_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__le_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__le_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__le_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
statistic.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC % % SS T A A T I SS T I C % % SSS T AAAAA T I SSS T I C % % SS T A A T I SS T I C % % SSSSS T A A T IIIII SSSSS T IIIII CCCC % % % % % % MagickCore Image Statistical Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/image-private.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/timer.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E v a l u a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EvaluateImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the EvaluateImage method is: % % MagickBooleanType EvaluateImage(Image *image, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % MagickBooleanType EvaluateImages(Image *images, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o op: A channel op. % % o value: A value value. % % o exception: return any errors or warnings in this structure. % */ typedef struct _PixelChannels { double channel[MaxPixelChannels]; } PixelChannels; static PixelChannels **DestroyPixelThreadSet(const Image *images, PixelChannels **pixels) { ssize_t i; size_t rows; assert(pixels != (PixelChannels **) NULL); rows=MagickMax(GetImageListLength(images),(size_t) GetMagickResourceLimit(ThreadResource)); for (i=0; i < (ssize_t) rows; i++) if (pixels[i] != (PixelChannels *) NULL) pixels[i]=(PixelChannels *) RelinquishMagickMemory(pixels[i]); pixels=(PixelChannels **) RelinquishMagickMemory(pixels); return(pixels); } static PixelChannels **AcquirePixelThreadSet(const Image *images) { const Image *next; PixelChannels **pixels; ssize_t i; size_t columns, number_images, rows; number_images=GetImageListLength(images); rows=MagickMax(number_images,(size_t) GetMagickResourceLimit(ThreadResource)); pixels=(PixelChannels **) AcquireQuantumMemory(rows,sizeof(*pixels)); if (pixels == (PixelChannels **) NULL) return((PixelChannels **) NULL); (void) memset(pixels,0,rows*sizeof(*pixels)); columns=MagickMax(number_images,MaxPixelChannels); for (next=images; next != (Image *) NULL; next=next->next) columns=MagickMax(next->columns,columns); for (i=0; i < (ssize_t) rows; i++) { ssize_t j; pixels[i]=(PixelChannels *) AcquireQuantumMemory(columns,sizeof(**pixels)); if (pixels[i] == (PixelChannels *) NULL) return(DestroyPixelThreadSet(images,pixels)); for (j=0; j < (ssize_t) columns; j++) { ssize_t k; for (k=0; k < MaxPixelChannels; k++) pixels[i][j].channel[k]=0.0; } } return(pixels); } static inline double EvaluateMax(const double x,const double y) { if (x > y) return(x); return(y); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { const PixelChannels *color_1, *color_2; double distance; ssize_t i; color_1=(const PixelChannels *) x; color_2=(const PixelChannels *) y; distance=0.0; for (i=0; i < MaxPixelChannels; i++) distance+=color_1->channel[i]-(double) color_2->channel[i]; return(distance < 0.0 ? -1 : distance > 0.0 ? 1 : 0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static double ApplyEvaluateOperator(RandomInfo *random_info,const Quantum pixel, const MagickEvaluateOperator op,const double value) { double result; ssize_t i; result=0.0; switch (op) { case UndefinedEvaluateOperator: break; case AbsEvaluateOperator: { result=(double) fabs((double) (pixel+value)); break; } case AddEvaluateOperator: { result=(double) (pixel+value); break; } case AddModulusEvaluateOperator: { /* This returns a 'floored modulus' of the addition which is a positive result. It differs from % or fmod() that returns a 'truncated modulus' result, where floor() is replaced by trunc() and could return a negative result (which is clipped). */ result=pixel+value; result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0)); break; } case AndEvaluateOperator: { result=(double) ((ssize_t) pixel & (ssize_t) (value+0.5)); break; } case CosineEvaluateOperator: { result=(double) (QuantumRange*(0.5*cos((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case DivideEvaluateOperator: { result=pixel/(value == 0.0 ? 1.0 : value); break; } case ExponentialEvaluateOperator: { result=(double) (QuantumRange*exp((double) (value*QuantumScale*pixel))); break; } case GaussianNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel,GaussianNoise, value); break; } case ImpulseNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel,ImpulseNoise, value); break; } case InverseLogEvaluateOperator: { result=(QuantumRange*pow((value+1.0),QuantumScale*pixel)-1.0)* PerceptibleReciprocal(value); break; } case LaplacianNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel, LaplacianNoise,value); break; } case LeftShiftEvaluateOperator: { result=(double) pixel; for (i=0; i < (ssize_t) value; i++) result*=2.0; break; } case LogEvaluateOperator: { if ((QuantumScale*pixel) >= MagickEpsilon) result=(double) (QuantumRange*log((double) (QuantumScale*value*pixel+ 1.0))/log((double) (value+1.0))); break; } case MaxEvaluateOperator: { result=(double) EvaluateMax((double) pixel,value); break; } case MeanEvaluateOperator: { result=(double) (pixel+value); break; } case MedianEvaluateOperator: { result=(double) (pixel+value); break; } case MinEvaluateOperator: { result=(double) MagickMin((double) pixel,value); break; } case MultiplicativeNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel, MultiplicativeGaussianNoise,value); break; } case MultiplyEvaluateOperator: { result=(double) (value*pixel); break; } case OrEvaluateOperator: { result=(double) ((ssize_t) pixel | (ssize_t) (value+0.5)); break; } case PoissonNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel,PoissonNoise, value); break; } case PowEvaluateOperator: { if (pixel < 0) result=(double) -(QuantumRange*pow((double) -(QuantumScale*pixel), (double) value)); else result=(double) (QuantumRange*pow((double) (QuantumScale*pixel), (double) value)); break; } case RightShiftEvaluateOperator: { result=(double) pixel; for (i=0; i < (ssize_t) value; i++) result/=2.0; break; } case RootMeanSquareEvaluateOperator: { result=((double) pixel*pixel+value); break; } case SetEvaluateOperator: { result=value; break; } case SineEvaluateOperator: { result=(double) (QuantumRange*(0.5*sin((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case SubtractEvaluateOperator: { result=(double) (pixel-value); break; } case SumEvaluateOperator: { result=(double) (pixel+value); break; } case ThresholdEvaluateOperator: { result=(double) (((double) pixel <= value) ? 0 : QuantumRange); break; } case ThresholdBlackEvaluateOperator: { result=(double) (((double) pixel <= value) ? 0 : pixel); break; } case ThresholdWhiteEvaluateOperator: { result=(double) (((double) pixel > value) ? QuantumRange : pixel); break; } case UniformNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel,UniformNoise, value); break; } case XorEvaluateOperator: { result=(double) ((ssize_t) pixel ^ (ssize_t) (value+0.5)); break; } } return(result); } static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception) { const Image *p, *q; size_t columns, rows; q=images; columns=images->columns; rows=images->rows; for (p=images; p != (Image *) NULL; p=p->next) { if (p->number_channels > q->number_channels) q=p; if (p->columns > columns) columns=p->columns; if (p->rows > rows) rows=p->rows; } return(CloneImage(q,columns,rows,MagickTrue,exception)); } MagickExport Image *EvaluateImages(const Image *images, const MagickEvaluateOperator op,ExceptionInfo *exception) { #define EvaluateImageTag "Evaluate/Image" CacheView *evaluate_view, **image_view; const Image *next; Image *image; MagickBooleanType status; MagickOffsetType progress; PixelChannels **magick_restrict evaluate_pixels; RandomInfo **magick_restrict random_info; size_t number_images; ssize_t j, y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImageCanvas(images,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { image=DestroyImage(image); return((Image *) NULL); } number_images=GetImageListLength(images); evaluate_pixels=AcquirePixelThreadSet(images); if (evaluate_pixels == (PixelChannels **) NULL) { image=DestroyImage(image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } image_view=(CacheView **) AcquireQuantumMemory(number_images, sizeof(*image_view)); if (image_view == (CacheView **) NULL) { image=DestroyImage(image); evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(image); } next=images; for (j=0; j < (ssize_t) number_images; j++) { image_view[j]=AcquireVirtualCacheView(next,exception); next=GetNextImageInList(next); } /* Evaluate image pixels. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); evaluate_view=AcquireAuthenticCacheView(image,exception); if (op == MedianEvaluateOperator) { #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,images,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Image *next; const int id = GetOpenMPThreadId(); const Quantum **p; PixelChannels *evaluate_pixel; Quantum *magick_restrict q; ssize_t x; ssize_t j; if (status == MagickFalse) continue; p=(const Quantum **) AcquireQuantumMemory(number_images,sizeof(*p)); if (p == (const Quantum **) NULL) { status=MagickFalse; (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", images->filename); continue; } for (j=0; j < (ssize_t) number_images; j++) { p[j]=GetCacheViewVirtualPixels(image_view[j],0,y,image->columns,1, exception); if (p[j] == (const Quantum *) NULL) break; } q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1, exception); if ((j < (ssize_t) number_images) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } evaluate_pixel=evaluate_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; next=images; for (j=0; j < (ssize_t) number_images; j++) { for (i=0; i < MaxPixelChannels; i++) evaluate_pixel[j].channel[i]=0.0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(next,channel); PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (evaluate_traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; evaluate_pixel[j].channel[i]=ApplyEvaluateOperator( random_info[id],GetPixelChannel(next,channel,p[j]),op, evaluate_pixel[j].channel[i]); } p[j]+=GetPixelChannels(next); next=GetNextImageInList(next); } qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel), IntensityCompare); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; q[i]=ClampToQuantum(evaluate_pixel[number_images/2].channel[i]); } q+=GetPixelChannels(image); } p=(const Quantum **) RelinquishMagickMemory(p); if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,EvaluateImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } else { #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,images,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Image *next; const int id = GetOpenMPThreadId(); const Quantum **p; ssize_t i, x; PixelChannels *evaluate_pixel; Quantum *magick_restrict q; ssize_t j; if (status == MagickFalse) continue; p=(const Quantum **) AcquireQuantumMemory(number_images,sizeof(*p)); if (p == (const Quantum **) NULL) { status=MagickFalse; (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", images->filename); continue; } for (j=0; j < (ssize_t) number_images; j++) { p[j]=GetCacheViewVirtualPixels(image_view[j],0,y,image->columns,1, exception); if (p[j] == (const Quantum *) NULL) break; } q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1, exception); if ((j < (ssize_t) number_images) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } evaluate_pixel=evaluate_pixels[id]; for (j=0; j < (ssize_t) image->columns; j++) for (i=0; i < MaxPixelChannels; i++) evaluate_pixel[j].channel[i]=0.0; next=images; for (j=0; j < (ssize_t) number_images; j++) { for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(next); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(next,channel); PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (evaluate_traits == UndefinedPixelTrait)) continue; if ((traits & UpdatePixelTrait) == 0) continue; evaluate_pixel[x].channel[i]=ApplyEvaluateOperator( random_info[id],GetPixelChannel(next,channel,p[j]),j == 0 ? AddEvaluateOperator : op,evaluate_pixel[x].channel[i]); } p[j]+=GetPixelChannels(next); } next=GetNextImageInList(next); } for (x=0; x < (ssize_t) image->columns; x++) { switch (op) { case MeanEvaluateOperator: { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) evaluate_pixel[x].channel[i]/=(double) number_images; break; } case MultiplyEvaluateOperator: { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ssize_t j; for (j=0; j < (ssize_t) (number_images-1); j++) evaluate_pixel[x].channel[i]*=QuantumScale; } break; } case RootMeanSquareEvaluateOperator: { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) evaluate_pixel[x].channel[i]=sqrt(evaluate_pixel[x].channel[i]/ number_images); break; } default: break; } } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; q[i]=ClampToQuantum(evaluate_pixel[x].channel[i]); } q+=GetPixelChannels(image); } p=(const Quantum **) RelinquishMagickMemory(p); if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,EvaluateImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } for (j=0; j < (ssize_t) number_images; j++) image_view[j]=DestroyCacheView(image_view[j]); image_view=(CacheView **) RelinquishMagickMemory(image_view); evaluate_view=DestroyCacheView(evaluate_view); evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) image=DestroyImage(image); return(image); } MagickExport MagickBooleanType EvaluateImage(Image *image, const MagickEvaluateOperator op,const double value,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double result; ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & CopyPixelTrait) != 0) continue; if ((traits & UpdatePixelTrait) == 0) continue; result=ApplyEvaluateOperator(random_info[id],q[i],op,value); if (op == MeanEvaluateOperator) result/=2.0; q[i]=ClampToQuantum(result); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EvaluateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F u n c t i o n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FunctionImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the FunctionImage method is: % % MagickBooleanType FunctionImage(Image *image, % const MagickFunction function,const ssize_t number_parameters, % const double *parameters,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o function: A channel function. % % o parameters: one or more parameters. % % o exception: return any errors or warnings in this structure. % */ static Quantum ApplyFunction(Quantum pixel,const MagickFunction function, const size_t number_parameters,const double *parameters, ExceptionInfo *exception) { double result; ssize_t i; (void) exception; result=0.0; switch (function) { case PolynomialFunction: { /* Polynomial: polynomial constants, highest to lowest order (e.g. c0*x^3+ c1*x^2+c2*x+c3). */ result=0.0; for (i=0; i < (ssize_t) number_parameters; i++) result=result*QuantumScale*pixel+parameters[i]; result*=QuantumRange; break; } case SinusoidFunction: { double amplitude, bias, frequency, phase; /* Sinusoid: frequency, phase, amplitude, bias. */ frequency=(number_parameters >= 1) ? parameters[0] : 1.0; phase=(number_parameters >= 2) ? parameters[1] : 0.0; amplitude=(number_parameters >= 3) ? parameters[2] : 0.5; bias=(number_parameters >= 4) ? parameters[3] : 0.5; result=(double) (QuantumRange*(amplitude*sin((double) (2.0* MagickPI*(frequency*QuantumScale*pixel+phase/360.0)))+bias)); break; } case ArcsinFunction: { double bias, center, range, width; /* Arcsin (peged at range limits for invalid results): width, center, range, and bias. */ width=(number_parameters >= 1) ? parameters[0] : 1.0; center=(number_parameters >= 2) ? parameters[1] : 0.5; range=(number_parameters >= 3) ? parameters[2] : 1.0; bias=(number_parameters >= 4) ? parameters[3] : 0.5; result=2.0/width*(QuantumScale*pixel-center); if ( result <= -1.0 ) result=bias-range/2.0; else if (result >= 1.0) result=bias+range/2.0; else result=(double) (range/MagickPI*asin((double) result)+bias); result*=QuantumRange; break; } case ArctanFunction: { double center, bias, range, slope; /* Arctan: slope, center, range, and bias. */ slope=(number_parameters >= 1) ? parameters[0] : 1.0; center=(number_parameters >= 2) ? parameters[1] : 0.5; range=(number_parameters >= 3) ? parameters[2] : 1.0; bias=(number_parameters >= 4) ? parameters[3] : 0.5; result=(double) (MagickPI*slope*(QuantumScale*pixel-center)); result=(double) (QuantumRange*(range/MagickPI*atan((double) result)+bias)); break; } case UndefinedFunction: break; } return(ClampToQuantum(result)); } MagickExport MagickBooleanType FunctionImage(Image *image, const MagickFunction function,const size_t number_parameters, const double *parameters,ExceptionInfo *exception) { #define FunctionImageTag "Function/Image " CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateFunctionImage(image,function,number_parameters,parameters, exception) != MagickFalse) return(MagickTrue); #endif if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ApplyFunction(q[i],function,number_parameters,parameters, exception); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FunctionImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e E n t r o p y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageEntropy() returns the entropy of one or more image channels. % % The format of the GetImageEntropy method is: % % MagickBooleanType GetImageEntropy(const Image *image,double *entropy, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o entropy: the average entropy of the selected channels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageEntropy(const Image *image, double *entropy,ExceptionInfo *exception) { ChannelStatistics *channel_statistics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); *entropy=channel_statistics[CompositePixelChannel].entropy; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtrema() returns the extrema of one or more image channels. % % The format of the GetImageExtrema method is: % % MagickBooleanType GetImageExtrema(const Image *image,size_t *minima, % size_t *maxima,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageExtrema(const Image *image, size_t *minima,size_t *maxima,ExceptionInfo *exception) { double max, min; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageRange(image,&min,&max,exception); *minima=(size_t) ceil(min-0.5); *maxima=(size_t) floor(max+0.5); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e K u r t o s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageKurtosis() returns the kurtosis and skewness of one or more image % channels. % % The format of the GetImageKurtosis method is: % % MagickBooleanType GetImageKurtosis(const Image *image,double *kurtosis, % double *skewness,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kurtosis: the kurtosis of the channel. % % o skewness: the skewness of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageKurtosis(const Image *image, double *kurtosis,double *skewness,ExceptionInfo *exception) { ChannelStatistics *channel_statistics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); *kurtosis=channel_statistics[CompositePixelChannel].kurtosis; *skewness=channel_statistics[CompositePixelChannel].skewness; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M e a n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMean() returns the mean and standard deviation of one or more image % channels. % % The format of the GetImageMean method is: % % MagickBooleanType GetImageMean(const Image *image,double *mean, % double *standard_deviation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mean: the average value in the channel. % % o standard_deviation: the standard deviation of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean, double *standard_deviation,ExceptionInfo *exception) { ChannelStatistics *channel_statistics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); *mean=channel_statistics[CompositePixelChannel].mean; *standard_deviation= channel_statistics[CompositePixelChannel].standard_deviation; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M e d i a n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMedian() returns the median pixel of one or more image channels. % % The format of the GetImageMedian method is: % % MagickBooleanType GetImageMedian(const Image *image,double *median, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o median: the average value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageMedian(const Image *image,double *median, ExceptionInfo *exception) { ChannelStatistics *channel_statistics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); *median=channel_statistics[CompositePixelChannel].median; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M o m e n t s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMoments() returns the normalized moments of one or more image % channels. % % The format of the GetImageMoments method is: % % ChannelMoments *GetImageMoments(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static size_t GetImageChannels(const Image *image) { ssize_t i; size_t channels; channels=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; channels++; } return((size_t) (channels == 0 ? 1 : channels)); } MagickExport ChannelMoments *GetImageMoments(const Image *image, ExceptionInfo *exception) { #define MaxNumberImageMoments 8 CacheView *image_view; ChannelMoments *channel_moments; double M00[MaxPixelChannels+1], M01[MaxPixelChannels+1], M02[MaxPixelChannels+1], M03[MaxPixelChannels+1], M10[MaxPixelChannels+1], M11[MaxPixelChannels+1], M12[MaxPixelChannels+1], M20[MaxPixelChannels+1], M21[MaxPixelChannels+1], M22[MaxPixelChannels+1], M30[MaxPixelChannels+1]; PointInfo centroid[MaxPixelChannels+1]; ssize_t channel, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_moments=(ChannelMoments *) AcquireQuantumMemory(MaxPixelChannels+1, sizeof(*channel_moments)); if (channel_moments == (ChannelMoments *) NULL) return(channel_moments); (void) memset(channel_moments,0,(MaxPixelChannels+1)* sizeof(*channel_moments)); (void) memset(centroid,0,sizeof(centroid)); (void) memset(M00,0,sizeof(M00)); (void) memset(M01,0,sizeof(M01)); (void) memset(M02,0,sizeof(M02)); (void) memset(M03,0,sizeof(M03)); (void) memset(M10,0,sizeof(M10)); (void) memset(M11,0,sizeof(M11)); (void) memset(M12,0,sizeof(M12)); (void) memset(M20,0,sizeof(M20)); (void) memset(M21,0,sizeof(M21)); (void) memset(M22,0,sizeof(M22)); (void) memset(M30,0,sizeof(M30)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; /* Compute center of mass (centroid). */ p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; M00[channel]+=QuantumScale*p[i]; M00[MaxPixelChannels]+=QuantumScale*p[i]; M10[channel]+=x*QuantumScale*p[i]; M10[MaxPixelChannels]+=x*QuantumScale*p[i]; M01[channel]+=y*QuantumScale*p[i]; M01[MaxPixelChannels]+=y*QuantumScale*p[i]; } p+=GetPixelChannels(image); } } for (channel=0; channel <= MaxPixelChannels; channel++) { /* Compute center of mass (centroid). */ centroid[channel].x=M10[channel]*PerceptibleReciprocal(M00[channel]); centroid[channel].y=M01[channel]*PerceptibleReciprocal(M00[channel]); } for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; /* Compute the image moments. */ p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; M11[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)* QuantumScale*p[i]; M11[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)* QuantumScale*p[i]; M20[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* QuantumScale*p[i]; M20[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* QuantumScale*p[i]; M02[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)* QuantumScale*p[i]; M02[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)* QuantumScale*p[i]; M21[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*QuantumScale*p[i]; M21[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*QuantumScale*p[i]; M12[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; M12[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; M22[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i]; M22[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i]; M30[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (x-centroid[channel].x)*QuantumScale*p[i]; M30[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (x-centroid[channel].x)*QuantumScale*p[i]; M03[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; M03[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; } p+=GetPixelChannels(image); } } M00[MaxPixelChannels]/=GetImageChannels(image); M01[MaxPixelChannels]/=GetImageChannels(image); M02[MaxPixelChannels]/=GetImageChannels(image); M03[MaxPixelChannels]/=GetImageChannels(image); M10[MaxPixelChannels]/=GetImageChannels(image); M11[MaxPixelChannels]/=GetImageChannels(image); M12[MaxPixelChannels]/=GetImageChannels(image); M20[MaxPixelChannels]/=GetImageChannels(image); M21[MaxPixelChannels]/=GetImageChannels(image); M22[MaxPixelChannels]/=GetImageChannels(image); M30[MaxPixelChannels]/=GetImageChannels(image); for (channel=0; channel <= MaxPixelChannels; channel++) { /* Compute elliptical angle, major and minor axes, eccentricity, & intensity. */ channel_moments[channel].centroid=centroid[channel]; channel_moments[channel].ellipse_axis.x=sqrt((2.0* PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])+ sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])* (M20[channel]-M02[channel])))); channel_moments[channel].ellipse_axis.y=sqrt((2.0* PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])- sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])* (M20[channel]-M02[channel])))); channel_moments[channel].ellipse_angle=RadiansToDegrees(1.0/2.0*atan(2.0* M11[channel]*PerceptibleReciprocal(M20[channel]-M02[channel]))); if (fabs(M11[channel]) < 0.0) { if ((fabs(M20[channel]-M02[channel]) >= 0.0) && ((M20[channel]-M02[channel]) < 0.0)) channel_moments[channel].ellipse_angle+=90.0; } else if (M11[channel] < 0.0) { if (fabs(M20[channel]-M02[channel]) >= 0.0) { if ((M20[channel]-M02[channel]) < 0.0) channel_moments[channel].ellipse_angle+=90.0; else channel_moments[channel].ellipse_angle+=180.0; } } else if ((fabs(M20[channel]-M02[channel]) >= 0.0) && ((M20[channel]-M02[channel]) < 0.0)) channel_moments[channel].ellipse_angle+=90.0; channel_moments[channel].ellipse_eccentricity=sqrt(1.0-( channel_moments[channel].ellipse_axis.y* channel_moments[channel].ellipse_axis.y*PerceptibleReciprocal( channel_moments[channel].ellipse_axis.x* channel_moments[channel].ellipse_axis.x))); channel_moments[channel].ellipse_intensity=M00[channel]* PerceptibleReciprocal(MagickPI*channel_moments[channel].ellipse_axis.x* channel_moments[channel].ellipse_axis.y+MagickEpsilon); } for (channel=0; channel <= MaxPixelChannels; channel++) { /* Normalize image moments. */ M10[channel]=0.0; M01[channel]=0.0; M11[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(1.0+1.0)/2.0)); M20[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(2.0+0.0)/2.0)); M02[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(0.0+2.0)/2.0)); M21[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(2.0+1.0)/2.0)); M12[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(1.0+2.0)/2.0)); M22[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(2.0+2.0)/2.0)); M30[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(3.0+0.0)/2.0)); M03[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(0.0+3.0)/2.0)); M00[channel]=1.0; } image_view=DestroyCacheView(image_view); for (channel=0; channel <= MaxPixelChannels; channel++) { /* Compute Hu invariant moments. */ channel_moments[channel].invariant[0]=M20[channel]+M02[channel]; channel_moments[channel].invariant[1]=(M20[channel]-M02[channel])* (M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel]; channel_moments[channel].invariant[2]=(M30[channel]-3.0*M12[channel])* (M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])* (3.0*M21[channel]-M03[channel]); channel_moments[channel].invariant[3]=(M30[channel]+M12[channel])* (M30[channel]+M12[channel])+(M21[channel]+M03[channel])* (M21[channel]+M03[channel]); channel_moments[channel].invariant[4]=(M30[channel]-3.0*M12[channel])* (M30[channel]+M12[channel])*((M30[channel]+M12[channel])* (M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])* (M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])* (M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])* (M30[channel]+M12[channel])-(M21[channel]+M03[channel])* (M21[channel]+M03[channel])); channel_moments[channel].invariant[5]=(M20[channel]-M02[channel])* ((M30[channel]+M12[channel])*(M30[channel]+M12[channel])- (M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+ 4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]); channel_moments[channel].invariant[6]=(3.0*M21[channel]-M03[channel])* (M30[channel]+M12[channel])*((M30[channel]+M12[channel])* (M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])* (M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])* (M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])* (M30[channel]+M12[channel])-(M21[channel]+M03[channel])* (M21[channel]+M03[channel])); channel_moments[channel].invariant[7]=M11[channel]*((M30[channel]+ M12[channel])*(M30[channel]+M12[channel])-(M03[channel]+M21[channel])* (M03[channel]+M21[channel]))-(M20[channel]-M02[channel])* (M30[channel]+M12[channel])*(M03[channel]+M21[channel]); } if (y < (ssize_t) image->rows) channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments); return(channel_moments); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l P e r c e p t u a l H a s h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePerceptualHash() returns the perceptual hash of one or more % image channels. % % The format of the GetImagePerceptualHash method is: % % ChannelPerceptualHash *GetImagePerceptualHash(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image, ExceptionInfo *exception) { ChannelPerceptualHash *perceptual_hash; char *colorspaces, *p, *q; const char *artifact; MagickBooleanType status; ssize_t i; perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory( MaxPixelChannels+1UL,sizeof(*perceptual_hash)); if (perceptual_hash == (ChannelPerceptualHash *) NULL) return((ChannelPerceptualHash *) NULL); artifact=GetImageArtifact(image,"phash:colorspaces"); if (artifact != NULL) colorspaces=AcquireString(artifact); else colorspaces=AcquireString("sRGB,HCLp"); perceptual_hash[0].number_colorspaces=0; perceptual_hash[0].number_channels=0; q=colorspaces; for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++) { ChannelMoments *moments; Image *hash_image; size_t j; ssize_t channel, colorspace; if (i >= MaximumNumberOfPerceptualColorspaces) break; colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p); if (colorspace < 0) break; perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace; hash_image=BlurImage(image,0.0,1.0,exception); if (hash_image == (Image *) NULL) break; hash_image->depth=8; status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace, exception); if (status == MagickFalse) break; moments=GetImageMoments(hash_image,exception); perceptual_hash[0].number_colorspaces++; perceptual_hash[0].number_channels+=GetImageChannels(hash_image); hash_image=DestroyImage(hash_image); if (moments == (ChannelMoments *) NULL) break; for (channel=0; channel <= MaxPixelChannels; channel++) for (j=0; j < MaximumNumberOfImageMoments; j++) perceptual_hash[channel].phash[i][j]= (-MagickLog10(moments[channel].invariant[j])); moments=(ChannelMoments *) RelinquishMagickMemory(moments); } colorspaces=DestroyString(colorspaces); return(perceptual_hash); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e R a n g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageRange() returns the range of one or more image channels. % % The format of the GetImageRange method is: % % MagickBooleanType GetImageRange(const Image *image,double *minima, % double *maxima,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageRange(const Image *image,double *minima, double *maxima,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType initialize, status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; initialize=MagickTrue; *maxima=0.0; *minima=0.0; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status,initialize) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double row_maxima = 0.0, row_minima = 0.0; MagickBooleanType row_initialize; const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } row_initialize=MagickTrue; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; if (row_initialize != MagickFalse) { row_minima=(double) p[i]; row_maxima=(double) p[i]; row_initialize=MagickFalse; } else { if ((double) p[i] < row_minima) row_minima=(double) p[i]; if ((double) p[i] > row_maxima) row_maxima=(double) p[i]; } } p+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageRange) #endif { if (initialize != MagickFalse) { *minima=row_minima; *maxima=row_maxima; initialize=MagickFalse; } else { if (row_minima < *minima) *minima=row_minima; if (row_maxima > *maxima) *maxima=row_maxima; } } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e S t a t i s t i c s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageStatistics() returns statistics for each channel in the image. The % statistics include the channel depth, its minima, maxima, mean, standard % deviation, kurtosis and skewness. You can access the red channel mean, for % example, like this: % % channel_statistics=GetImageStatistics(image,exception); % red_mean=channel_statistics[RedPixelChannel].mean; % % Use MagickRelinquishMemory() to free the statistics buffer. % % The format of the GetImageStatistics method is: % % ChannelStatistics *GetImageStatistics(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static ssize_t GetMedianPixel(Quantum *pixels,const size_t n) { #define SwapPixels(alpha,beta) \ { \ Quantum gamma=(alpha); \ (alpha)=(beta);(beta)=gamma; \ } ssize_t low = 0, high = (ssize_t) n-1, median = (low+high)/2; for ( ; ; ) { ssize_t l = low+1, h = high, mid = (low+high)/2; if (high <= low) return(median); if (high == (low+1)) { if (pixels[low] > pixels[high]) SwapPixels(pixels[low],pixels[high]); return(median); } if (pixels[mid] > pixels[high]) SwapPixels(pixels[mid],pixels[high]); if (pixels[low] > pixels[high]) SwapPixels(pixels[low], pixels[high]); if (pixels[mid] > pixels[low]) SwapPixels(pixels[mid],pixels[low]); SwapPixels(pixels[mid],pixels[low+1]); for ( ; ; ) { do l++; while (pixels[low] > pixels[l]); do h--; while (pixels[h] > pixels[low]); if (h < l) break; SwapPixels(pixels[l],pixels[h]); } SwapPixels(pixels[low],pixels[h]); if (h <= median) low=l; if (h >= median) high=h-1; } } MagickExport ChannelStatistics *GetImageStatistics(const Image *image, ExceptionInfo *exception) { ChannelStatistics *channel_statistics; double area, *histogram, standard_deviation; MagickStatusType status; MemoryInfo *median_info; Quantum *median; QuantumAny range; ssize_t i; size_t depth; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)* sizeof(*histogram)); channel_statistics=(ChannelStatistics *) AcquireQuantumMemory( MaxPixelChannels+1,sizeof(*channel_statistics)); if ((channel_statistics == (ChannelStatistics *) NULL) || (histogram == (double *) NULL)) { if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (channel_statistics != (ChannelStatistics *) NULL) channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(channel_statistics); } (void) memset(channel_statistics,0,(MaxPixelChannels+1)* sizeof(*channel_statistics)); for (i=0; i <= (ssize_t) MaxPixelChannels; i++) { channel_statistics[i].depth=1; channel_statistics[i].maxima=(-MagickMaximumValue); channel_statistics[i].minima=MagickMaximumValue; } (void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; /* Compute pixel statistics. */ p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; if (GetPixelReadMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[channel].depth; range=GetQuantumRange(depth); status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range), range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[channel].depth++; if (channel_statistics[channel].depth > channel_statistics[CompositePixelChannel].depth) channel_statistics[CompositePixelChannel].depth= channel_statistics[channel].depth; i--; continue; } } if ((double) p[i] < channel_statistics[channel].minima) channel_statistics[channel].minima=(double) p[i]; if ((double) p[i] > channel_statistics[channel].maxima) channel_statistics[channel].maxima=(double) p[i]; channel_statistics[channel].sum+=p[i]; channel_statistics[channel].sum_squared+=(double) p[i]*p[i]; channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i]; channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]* p[i]; channel_statistics[channel].area++; if ((double) p[i] < channel_statistics[CompositePixelChannel].minima) channel_statistics[CompositePixelChannel].minima=(double) p[i]; if ((double) p[i] > channel_statistics[CompositePixelChannel].maxima) channel_statistics[CompositePixelChannel].maxima=(double) p[i]; histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum((double) p[i]))+i]++; channel_statistics[CompositePixelChannel].sum+=(double) p[i]; channel_statistics[CompositePixelChannel].sum_squared+=(double) p[i]*p[i]; channel_statistics[CompositePixelChannel].sum_cubed+=(double) p[i]*p[i]*p[i]; channel_statistics[CompositePixelChannel].sum_fourth_power+=(double) p[i]*p[i]*p[i]*p[i]; channel_statistics[CompositePixelChannel].area++; } p+=GetPixelChannels(image); } } for (i=0; i <= (ssize_t) MaxPixelChannels; i++) { /* Normalize pixel statistics. */ area=PerceptibleReciprocal(channel_statistics[i].area); channel_statistics[i].sum*=area; channel_statistics[i].sum_squared*=area; channel_statistics[i].sum_cubed*=area; channel_statistics[i].sum_fourth_power*=area; channel_statistics[i].mean=channel_statistics[i].sum; channel_statistics[i].variance=channel_statistics[i].sum_squared; standard_deviation=sqrt(channel_statistics[i].variance- (channel_statistics[i].mean*channel_statistics[i].mean)); standard_deviation=sqrt(PerceptibleReciprocal(channel_statistics[i].area- 1.0)*channel_statistics[i].area*standard_deviation*standard_deviation); channel_statistics[i].standard_deviation=standard_deviation; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double number_bins; ssize_t j; /* Compute pixel entropy. */ PixelChannel channel = GetPixelChannelChannel(image,i); number_bins=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) if (histogram[GetPixelChannels(image)*j+i] > 0.0) number_bins++; area=PerceptibleReciprocal(channel_statistics[channel].area); for (j=0; j <= (ssize_t) MaxMap; j++) { double count; count=area*histogram[GetPixelChannels(image)*j+i]; channel_statistics[channel].entropy+=-count*MagickLog10(count)* PerceptibleReciprocal(MagickLog10(number_bins)); channel_statistics[CompositePixelChannel].entropy+=-count* MagickLog10(count)*PerceptibleReciprocal(MagickLog10(number_bins))/ GetPixelChannels(image); } } histogram=(double *) RelinquishMagickMemory(histogram); for (i=0; i <= (ssize_t) MaxPixelChannels; i++) { /* Compute kurtosis & skewness statistics. */ standard_deviation=PerceptibleReciprocal( channel_statistics[i].standard_deviation); channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0* channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0* channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].mean)*(standard_deviation*standard_deviation* standard_deviation); channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0* channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0* channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean* channel_statistics[i].mean*1.0*channel_statistics[i].mean* channel_statistics[i].mean)*(standard_deviation*standard_deviation* standard_deviation*standard_deviation)-3.0; } median_info=AcquireVirtualMemory(image->columns,image->rows*sizeof(*median)); if (median_info == (MemoryInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); else { ssize_t i; median=(Quantum *) GetVirtualMemoryBlob(median_info); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { size_t n = 0; /* Compute median statistics for each channel. */ PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelReadMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } median[n++]=p[i]; } p+=GetPixelChannels(image); } channel_statistics[channel].median=(double) median[ GetMedianPixel(median,n)]; } median_info=RelinquishVirtualMemory(median_info); } channel_statistics[CompositePixelChannel].mean=0.0; channel_statistics[CompositePixelChannel].median=0.0; channel_statistics[CompositePixelChannel].standard_deviation=0.0; channel_statistics[CompositePixelChannel].entropy=0.0; for (i=0; i < (ssize_t) MaxPixelChannels; i++) { channel_statistics[CompositePixelChannel].mean+= channel_statistics[i].mean; channel_statistics[CompositePixelChannel].median+= channel_statistics[i].median; channel_statistics[CompositePixelChannel].standard_deviation+= channel_statistics[i].standard_deviation; channel_statistics[CompositePixelChannel].entropy+= channel_statistics[i].entropy; } channel_statistics[CompositePixelChannel].mean/=(double) GetImageChannels(image); channel_statistics[CompositePixelChannel].median/=(double) GetImageChannels(image); channel_statistics[CompositePixelChannel].standard_deviation/=(double) GetImageChannels(image); channel_statistics[CompositePixelChannel].entropy/=(double) GetImageChannels(image); if (y < (ssize_t) image->rows) channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(channel_statistics); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l y n o m i a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolynomialImage() returns a new image where each pixel is the sum of the % pixels in the image sequence after applying its corresponding terms % (coefficient and degree pairs). % % The format of the PolynomialImage method is: % % Image *PolynomialImage(const Image *images,const size_t number_terms, % const double *terms,ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o number_terms: the number of terms in the list. The actual list length % is 2 x number_terms + 1 (the constant). % % o terms: the list of polynomial coefficients and degree pairs and a % constant. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolynomialImage(const Image *images, const size_t number_terms,const double *terms,ExceptionInfo *exception) { #define PolynomialImageTag "Polynomial/Image" CacheView *polynomial_view; Image *image; MagickBooleanType status; MagickOffsetType progress; PixelChannels **magick_restrict polynomial_pixels; size_t number_images; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImageCanvas(images,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { image=DestroyImage(image); return((Image *) NULL); } number_images=GetImageListLength(images); polynomial_pixels=AcquirePixelThreadSet(images); if (polynomial_pixels == (PixelChannels **) NULL) { image=DestroyImage(image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } /* Polynomial image pixels. */ status=MagickTrue; progress=0; polynomial_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); ssize_t i, x; PixelChannels *polynomial_pixel; Quantum *magick_restrict q; ssize_t j; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } polynomial_pixel=polynomial_pixels[id]; for (j=0; j < (ssize_t) image->columns; j++) for (i=0; i < MaxPixelChannels; i++) polynomial_pixel[j].channel[i]=0.0; next=images; for (j=0; j < (ssize_t) number_images; j++) { const Quantum *p; if (j >= (ssize_t) number_terms) continue; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); break; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(next); i++) { MagickRealType coefficient, degree; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(next,channel); PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (polynomial_traits == UndefinedPixelTrait)) continue; if ((traits & UpdatePixelTrait) == 0) continue; coefficient=(MagickRealType) terms[2*j]; degree=(MagickRealType) terms[(j << 1)+1]; polynomial_pixel[x].channel[i]+=coefficient* pow(QuantumScale*GetPixelChannel(image,channel,p),degree); } p+=GetPixelChannels(next); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,PolynomialImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } polynomial_view=DestroyCacheView(polynomial_view); polynomial_pixels=DestroyPixelThreadSet(images,polynomial_pixels); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t a t i s t i c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StatisticImage() makes each pixel the min / max / median / mode / etc. of % the neighborhood of the specified width and height. % % The format of the StatisticImage method is: % % Image *StatisticImage(const Image *image,const StatisticType type, % const size_t width,const size_t height,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the statistic type (median, mode, etc.). % % o width: the width of the pixel neighborhood. % % o height: the height of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ typedef struct _SkipNode { size_t next[9], count, signature; } SkipNode; typedef struct _SkipList { ssize_t level; SkipNode *nodes; } SkipList; typedef struct _PixelList { size_t length, seed; SkipList skip_list; size_t signature; } PixelList; static PixelList *DestroyPixelList(PixelList *pixel_list) { if (pixel_list == (PixelList *) NULL) return((PixelList *) NULL); if (pixel_list->skip_list.nodes != (SkipNode *) NULL) pixel_list->skip_list.nodes=(SkipNode *) RelinquishAlignedMemory( pixel_list->skip_list.nodes); pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list) { ssize_t i; assert(pixel_list != (PixelList **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixel_list[i] != (PixelList *) NULL) pixel_list[i]=DestroyPixelList(pixel_list[i]); pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList *AcquirePixelList(const size_t width,const size_t height) { PixelList *pixel_list; pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list)); if (pixel_list == (PixelList *) NULL) return(pixel_list); (void) memset((void *) pixel_list,0,sizeof(*pixel_list)); pixel_list->length=width*height; pixel_list->skip_list.nodes=(SkipNode *) AcquireAlignedMemory(65537UL, sizeof(*pixel_list->skip_list.nodes)); if (pixel_list->skip_list.nodes == (SkipNode *) NULL) return(DestroyPixelList(pixel_list)); (void) memset(pixel_list->skip_list.nodes,0,65537UL* sizeof(*pixel_list->skip_list.nodes)); pixel_list->signature=MagickCoreSignature; return(pixel_list); } static PixelList **AcquirePixelListThreadSet(const size_t width, const size_t height) { PixelList **pixel_list; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixel_list=(PixelList **) AcquireQuantumMemory(number_threads, sizeof(*pixel_list)); if (pixel_list == (PixelList **) NULL) return((PixelList **) NULL); (void) memset(pixel_list,0,number_threads*sizeof(*pixel_list)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_list[i]=AcquirePixelList(width,height); if (pixel_list[i] == (PixelList *) NULL) return(DestroyPixelListThreadSet(pixel_list)); } return(pixel_list); } static void AddNodePixelList(PixelList *pixel_list,const size_t color) { SkipList *p; ssize_t level; size_t search, update[9]; /* Initialize the node. */ p=(&pixel_list->skip_list); p->nodes[color].signature=pixel_list->signature; p->nodes[color].count=1; /* Determine where it belongs in the list. */ search=65536UL; for (level=p->level; level >= 0; level--) { while (p->nodes[search].next[level] < color) search=p->nodes[search].next[level]; update[level]=search; } /* Generate a pseudo-random level for this node. */ for (level=0; ; level++) { pixel_list->seed=(pixel_list->seed*42893621L)+1L; if ((pixel_list->seed & 0x300) != 0x300) break; } if (level > 8) level=8; if (level > (p->level+2)) level=p->level+2; /* If we're raising the list's level, link back to the root node. */ while (level > p->level) { p->level++; update[p->level]=65536UL; } /* Link the node into the skip-list. */ do { p->nodes[color].next[level]=p->nodes[update[level]].next[level]; p->nodes[update[level]].next[level]=color; } while (level-- > 0); } static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel) { SkipList *p; size_t color; ssize_t count; /* Find the median value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; do { color=p->nodes[color].next[0]; count+=p->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); *pixel=ScaleShortToQuantum((unsigned short) color); } static inline void GetModePixelList(PixelList *pixel_list,Quantum *pixel) { SkipList *p; size_t color, max_count, mode; ssize_t count; /* Make each pixel the 'predominant color' of the specified neighborhood. */ p=(&pixel_list->skip_list); color=65536L; mode=color; max_count=p->nodes[mode].count; count=0; do { color=p->nodes[color].next[0]; if (p->nodes[color].count > max_count) { mode=color; max_count=p->nodes[mode].count; } count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); *pixel=ScaleShortToQuantum((unsigned short) mode); } static inline void GetNonpeakPixelList(PixelList *pixel_list,Quantum *pixel) { SkipList *p; size_t color, next, previous; ssize_t count; /* Finds the non peak value for each of the colors. */ p=(&pixel_list->skip_list); color=65536L; next=p->nodes[color].next[0]; count=0; do { previous=color; color=next; next=p->nodes[color].next[0]; count+=p->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); if ((previous == 65536UL) && (next != 65536UL)) color=next; else if ((previous != 65536UL) && (next == 65536UL)) color=previous; *pixel=ScaleShortToQuantum((unsigned short) color); } static inline void InsertPixelList(const Quantum pixel,PixelList *pixel_list) { size_t signature; unsigned short index; index=ScaleQuantumToShort(pixel); signature=pixel_list->skip_list.nodes[index].signature; if (signature == pixel_list->signature) { pixel_list->skip_list.nodes[index].count++; return; } AddNodePixelList(pixel_list,index); } static void ResetPixelList(PixelList *pixel_list) { int level; SkipNode *root; SkipList *p; /* Reset the skip-list. */ p=(&pixel_list->skip_list); root=p->nodes+65536UL; p->level=0; for (level=0; level < 9; level++) root->next[level]=65536UL; pixel_list->seed=pixel_list->signature++; } MagickExport Image *StatisticImage(const Image *image,const StatisticType type, const size_t width,const size_t height,ExceptionInfo *exception) { #define StatisticImageTag "Statistic/Image" CacheView *image_view, *statistic_view; Image *statistic_image; MagickBooleanType status; MagickOffsetType progress; PixelList **magick_restrict pixel_list; ssize_t center, y; /* Initialize statistics image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); statistic_image=CloneImage(image,0,0,MagickTrue, exception); if (statistic_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(statistic_image,DirectClass,exception); if (status == MagickFalse) { statistic_image=DestroyImage(statistic_image); return((Image *) NULL); } pixel_list=AcquirePixelListThreadSet(MagickMax(width,1),MagickMax(height,1)); if (pixel_list == (PixelList **) NULL) { statistic_image=DestroyImage(statistic_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Make each pixel the min / max / median / mode / etc. of the neighborhood. */ center=(ssize_t) GetPixelChannels(image)*(image->columns+MagickMax(width,1))* (MagickMax(height,1)/2L)+GetPixelChannels(image)*(MagickMax(width,1)/2L); status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); statistic_view=AcquireAuthenticCacheView(statistic_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,statistic_image,statistic_image->rows,1) #endif for (y=0; y < (ssize_t) statistic_image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) MagickMax(width,1)/2L),y- (ssize_t) (MagickMax(height,1)/2L),image->columns+MagickMax(width,1), MagickMax(height,1),exception); q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) statistic_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double area, maximum, minimum, sum, sum_squared; Quantum pixel; const Quantum *magick_restrict pixels; ssize_t u; ssize_t v; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait statistic_traits=GetPixelChannelTraits(statistic_image, channel); if ((traits == UndefinedPixelTrait) || (statistic_traits == UndefinedPixelTrait)) continue; if (((statistic_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(statistic_image,channel,p[center+i],q); continue; } if ((statistic_traits & UpdatePixelTrait) == 0) continue; pixels=p; area=0.0; minimum=pixels[i]; maximum=pixels[i]; sum=0.0; sum_squared=0.0; ResetPixelList(pixel_list[id]); for (v=0; v < (ssize_t) MagickMax(height,1); v++) { for (u=0; u < (ssize_t) MagickMax(width,1); u++) { if ((type == MedianStatistic) || (type == ModeStatistic) || (type == NonpeakStatistic)) { InsertPixelList(pixels[i],pixel_list[id]); pixels+=GetPixelChannels(image); continue; } area++; if (pixels[i] < minimum) minimum=(double) pixels[i]; if (pixels[i] > maximum) maximum=(double) pixels[i]; sum+=(double) pixels[i]; sum_squared+=(double) pixels[i]*pixels[i]; pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } switch (type) { case GradientStatistic: { pixel=ClampToQuantum(MagickAbsoluteValue(maximum-minimum)); break; } case MaximumStatistic: { pixel=ClampToQuantum(maximum); break; } case MeanStatistic: default: { pixel=ClampToQuantum(sum/area); break; } case MedianStatistic: { GetMedianPixelList(pixel_list[id],&pixel); break; } case MinimumStatistic: { pixel=ClampToQuantum(minimum); break; } case ModeStatistic: { GetModePixelList(pixel_list[id],&pixel); break; } case NonpeakStatistic: { GetNonpeakPixelList(pixel_list[id],&pixel); break; } case RootMeanSquareStatistic: { pixel=ClampToQuantum(sqrt(sum_squared/area)); break; } case StandardDeviationStatistic: { pixel=ClampToQuantum(sqrt(sum_squared/area-(sum/area*sum/area))); break; } } SetPixelChannel(statistic_image,channel,pixel,q); } p+=GetPixelChannels(image); q+=GetPixelChannels(statistic_image); } if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,StatisticImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } statistic_view=DestroyCacheView(statistic_view); image_view=DestroyCacheView(image_view); pixel_list=DestroyPixelListThreadSet(pixel_list); if (status == MagickFalse) statistic_image=DestroyImage(statistic_image); return(statistic_image); }
main.c
// // Created by sergio on 6/05/18. // #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <memory.h> #include <math.h> #include <omp.h> #define ALL_PULSOS 800 #define PULSOS 100 #define GATE_MAX 500 /* 2 x 250 */ #define GRADOS 8 struct complex { float phas; float quad; }; int process(const uint16_t *valid_samples, const int * pos_pulso, const int * a_gates, double pulsos_v_gate[ALL_PULSOS][GATE_MAX], double pulsos_h_gate[ALL_PULSOS][GATE_MAX], const char * buffer); int correlacion(double autocorr_v[GRADOS][GATE_MAX],double autocorr_h[GRADOS][GATE_MAX], double pulsos_v_gate[ALL_PULSOS][GATE_MAX], double pulsos_h_gate[ALL_PULSOS][GATE_MAX]); int main( int argc, char *argv[] ) { int file_size, ptr_buffer = 0; char *buffer; /* buffer para recuperar los datos */ int a_gates[ALL_PULSOS], /* vector de gates */ pos_pulso[ALL_PULSOS]; /* posicion en memoria de cada pulso */ int grado_aux = 82; /* aux para almacenar grado */ int thilo = 0; double pulsos_v_gate[ALL_PULSOS][GATE_MAX], pulsos_h_gate[ALL_PULSOS][GATE_MAX]; double autocorr_v[GRADOS][GATE_MAX], /* autocorrelacion del canal horizontal*/ autocorr_h[GRADOS][GATE_MAX]; /* autocorrelacion del canal vertical*/ uint16_t valid_samples[ALL_PULSOS]; /* vector de todos los valid samples*/ FILE *filein; /* Puntero de archivo de lectura*/ FILE *fileout; /* Puntero de archivo de escritura pos-processamiento*/ double start = omp_get_wtime(); double tock; if(argc > 1) thilo = (int) strtol(argv[1],NULL,10); if (thilo == 0) thilo = 1; omp_set_num_threads(thilo); filein = fopen ("./pulsos.iq", "rb"); if(filein == NULL) { perror("# opening file ERROR"); exit(EXIT_FAILURE); } /* obtengo el tam del archivo en bytes */ fseek ( filein, 0L, SEEK_END ); file_size = (int)ftell ( filein ); fseek ( filein, 0, SEEK_SET ); buffer = (char *) calloc(1, (size_t)file_size + 1); if ( buffer == NULL) { perror("# Memory error malloc! \n" ); fclose (filein); exit(EXIT_FAILURE); } /* lectura total del archivo */ fread(buffer, (size_t)file_size, 1, filein); fclose(filein); for (int i = 0; ptr_buffer < file_size; i++) { pos_pulso[i] = ptr_buffer + sizeof(uint16_t); /* Guarda la posicion en memoria de los pulsos */ memmove ( &valid_samples[i], &buffer[ptr_buffer], sizeof(uint16_t) ); /* Obtengo cantidad de muestras */ /* actualizo puntero, 4 por F y Q de V y H */ ptr_buffer += sizeof(uint16_t) + 4 * valid_samples[i] * sizeof(float); a_gates[i] = valid_samples[i] / GATE_MAX; /* Calculo de Gate tentativo para cada pulso */ } tock = omp_get_wtime(); printf("# Lectura = %lfs\n",tock-start); /* procesameiento para obtener la matriz pulso gate */ tock = omp_get_wtime(); process(valid_samples, pos_pulso, a_gates, pulsos_v_gate, pulsos_h_gate, buffer); free (buffer); printf("# Process = %lfs\n",omp_get_wtime() - tock); /* calculo de la autocorrelacion para cada grado_gate */ tock = omp_get_wtime(); correlacion(autocorr_v, autocorr_h ,pulsos_v_gate , pulsos_h_gate); printf("# Correlacion = %lfs\n",omp_get_wtime() - tock); tock = omp_get_wtime(); fileout = fopen("./proccess.outln","wb"); for (int idx_grado = 0; idx_grado < GRADOS; idx_grado++) { grado_aux ++; fwrite(&grado_aux, sizeof(int), 1,fileout); /*se escribe en numero de grado al que pertenece los datos */ fwrite(autocorr_h[idx_grado], sizeof(double), GATE_MAX, fileout); /*gates del canar h*/ fwrite(autocorr_v[idx_grado], sizeof(double), GATE_MAX, fileout); /*gates del canar v*/ } fclose(fileout); printf("# Escitura = %lfs\n",omp_get_wtime() - tock); printf("# Total = %lfs\n",omp_get_wtime() - start); return 0; } /** * procesameiento para obtener la matriz pulso gate * @param valid_samples uiint16_t, vector que contiene los valid samples de cada pulso. * @param pos_pulso int , vector posicon en memoria de cada pulso * @param a_gates int , vector que contiene la cantindad de gate tentativos * @param pulsos_v_gate int, matriz que contiene la pulsos por pulso del canal v * @param pulsos_h_gate int, matriz que contiene la pulsos por pulso del canal h * @param buffer * @return */ int process(const uint16_t *valid_samples, const int * pos_pulso, const int * a_gates, double pulsos_v_gate[ALL_PULSOS][GATE_MAX], double pulsos_h_gate[ALL_PULSOS][GATE_MAX], const char * buffer) { int resto = 0, resto_add = 0, gate_local = 0, valids_count = 0, ptr_buffer = 0; double cont_v = 0, cont_h = 0; struct complex muestra_z; /* estructura para obtener los datos de una muestra */ #pragma omp parallel for private(resto, ptr_buffer,valids_count,resto_add,gate_local) \ reduction(+: cont_v, cont_h) for (int idx_puls = 0; idx_puls < ALL_PULSOS; idx_puls++) { resto = valid_samples[idx_puls] % GATE_MAX; /* calculo el resto de cada gate */ ptr_buffer = pos_pulso[idx_puls]; valids_count = 0; /* contador de muestras */ resto_add = 0; for (int idx_gate = 0; idx_gate < GATE_MAX ; idx_gate++) { resto_add += resto; /* acumula el resto */ if (resto_add >= GATE_MAX) /* si la acumulado es mayor al divisor se incremeta la cantidad */ { /* de muestras a tomar */ gate_local = a_gates[idx_puls] + 1; resto_add -= GATE_MAX; } else /* se mantiene el nuemro de muestras tentativos */ gate_local = a_gates[idx_puls]; cont_v = 0; cont_h = 0; for (int i = 0; i < gate_local && valids_count < valid_samples[idx_puls]; i++) { memmove(&muestra_z, &buffer[ptr_buffer], sizeof(struct complex)); /* obtencion de muestra */ cont_v += sqrt(pow(muestra_z.phas,2) + pow(muestra_z.quad, 2)); /*modulo para el canal v*/ memmove(&muestra_z, &buffer[ptr_buffer + valid_samples[idx_puls] * sizeof(struct complex)], sizeof(struct complex)); cont_h += sqrt(pow(muestra_z.phas,2) + pow(muestra_z.quad, 2)); /* modulo para el canal h*/ ptr_buffer += sizeof(struct complex); valids_count++; } /* obtencion de gates por pulso (media aritmetica) */ pulsos_v_gate[idx_puls][idx_gate] = cont_v / gate_local; pulsos_h_gate[idx_puls][idx_gate] = cont_h / gate_local; } } return 0; } /** * calculo de la autocorrelacion para cada grado_gate * @param autocorr_v double, matriz en donde se almacena la acurrelacion del canal v * @param autocorr_h double, matriz en donde se almacena la acurrelacion del canal h * @param pulsos_v_gate int, matriz que contiene la pulsos por pulso del canal v * @param pulsos_h_gate int, matriz que contiene la pulsos por pulso del canal h * @return 0 */ int correlacion(double autocorr_v[GRADOS][GATE_MAX],double autocorr_h[GRADOS][GATE_MAX], double pulsos_v_gate[ALL_PULSOS][GATE_MAX], double pulsos_h_gate[ALL_PULSOS][GATE_MAX]) { double sumador_v = 0, sumador_h = 0; #pragma omp parallel for reduction(+:sumador_v, sumador_h) for (int idx_grado = 0; idx_grado < GRADOS; idx_grado++) { for ( int idx_gate = 0; idx_gate < GATE_MAX ; idx_gate++ ) { sumador_v = 0; sumador_h = 0; for ( int idx_pulso = 0; idx_pulso < (PULSOS - 1) ; idx_pulso++ ) { sumador_v += pulsos_v_gate[(PULSOS * idx_grado) + idx_pulso][idx_gate] * pulsos_v_gate[(PULSOS * idx_grado) + idx_pulso + 1][idx_gate]; sumador_h += pulsos_h_gate[(PULSOS * idx_grado) + idx_pulso][idx_gate] * pulsos_h_gate[(PULSOS * idx_grado) + idx_pulso + 1][idx_gate]; } autocorr_v[idx_grado][idx_gate] = sumador_v / PULSOS; autocorr_h[idx_grado][idx_gate] = sumador_h / PULSOS; } } return 0; }
omp_hl.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #include "papi.h" #include "papi_test.h" #include "do_loops.h" int main( int argc, char **argv ) { int retval, i; int quiet = 0; char* region_name; /* Set TESTS_QUIET variable */ quiet = tests_quiet( argc, argv ); region_name = "do_flops"; #pragma omp parallel #pragma omp for for ( i = 1; i <= 4; ++i ) { int tid; tid = omp_get_thread_num(); if ( !quiet ) { printf("\nThread %d: instrument flops\n", tid); } retval = PAPI_hl_region_begin(region_name); if ( retval != PAPI_OK ) { test_fail( __FILE__, __LINE__, "PAPI_hl_region_begin", retval ); } do_flops( NUM_FLOPS ); retval = PAPI_hl_region_end(region_name); if ( retval != PAPI_OK ) { test_fail( __FILE__, __LINE__, "PAPI_hl_region_end", retval ); } } region_name = "do_flops_2"; #pragma omp parallel #pragma omp for for ( i = 1; i <= 4; ++i ) { int tid; tid = omp_get_thread_num(); if ( !quiet ) { printf("\nThread %d: instrument flops_2\n", tid); } retval = PAPI_hl_region_begin(region_name); if ( retval != PAPI_OK ) { test_fail( __FILE__, __LINE__, "PAPI_hl_region_begin", retval ); } do_flops( NUM_FLOPS ); retval = PAPI_hl_region_end(region_name); if ( retval != PAPI_OK ) { test_fail( __FILE__, __LINE__, "PAPI_hl_region_end", retval ); } } test_hl_pass( __FILE__ ); return 0; }
bricksetup.h
/** * @file * @brief Brick iterator and setup code */ #ifndef BRICK_SETUP_H #define BRICK_SETUP_H #include <vector> #include <typeinfo> #include <initializer_list> #include <algorithm> #include "brick.h" struct RunningTag { }; struct StopTag { }; template<unsigned select> struct TagSelect { static constexpr RunningTag value = RunningTag(); }; template<> struct TagSelect<0> { static constexpr StopTag value = StopTag(); }; template<unsigned dims, unsigned d> inline void init_fill(const std::vector<long> &stride, unsigned *adjlist, unsigned *grid_ptr, unsigned *low, unsigned *high, RunningTag t) { unsigned str = static_power<3, d - 1>::value; init_fill<dims, d - 1>(stride, adjlist, grid_ptr - stride[dims - d], low, high, TagSelect<d - 1>::value); init_fill<dims, d - 1>(stride, adjlist + str, grid_ptr, low, high, TagSelect<d - 1>::value); init_fill<dims, d - 1>(stride, adjlist + str * 2, grid_ptr + stride[dims - d], low, high, TagSelect<d - 1>::value); } template<unsigned dims, unsigned d> inline void init_fill(const std::vector<long> &stride, unsigned *adjlist, unsigned *grid_ptr, unsigned *low, unsigned *high, StopTag t) { if (grid_ptr >= low && grid_ptr < high) *adjlist = *grid_ptr; else *adjlist = 0; } template<unsigned dims, unsigned d> inline void init_iter(const std::vector<long> &dimlist, const std::vector<long> &stride, BrickInfo<dims> &bInfo, unsigned *grid_ptr, unsigned *low, unsigned *high, RunningTag t) { if (dims == d) { #pragma omp parallel for for (long s = 0; s < dimlist[dims - d]; ++s) init_iter<dims, d - 1>(dimlist, stride, bInfo, grid_ptr + s * stride[dims - d], low, high, TagSelect<d - 1>::value); } else { for (long s = 0; s < dimlist[dims - d]; ++s) init_iter<dims, d - 1>(dimlist, stride, bInfo, grid_ptr + s * stride[dims - d], low, high, TagSelect<d - 1>::value); } } template<unsigned dims, unsigned d> inline void init_iter(const std::vector<long> &dimlist, const std::vector<long> &stride, BrickInfo<dims> &bInfo, unsigned *grid_ptr, unsigned *low, unsigned *high, StopTag t) { init_fill<dims, dims>(stride, bInfo.adj[*grid_ptr], grid_ptr, low, high, RunningTag()); } template<unsigned dims> BrickInfo<dims> init_grid(unsigned *&grid_ptr, const std::vector<long> &dimlist) { long size = 1; for (const auto a: dimlist) size *= a; grid_ptr = (unsigned *) malloc(size * sizeof(unsigned)); for (unsigned pos = 0; pos < size; ++pos) grid_ptr[pos] = pos; BrickInfo<dims> bInfo(size); long tsize = size; std::vector<long> stride; for (const auto a: dimlist) { size = size / a; stride.push_back(size); } init_iter<dims, dims>(dimlist, stride, bInfo, grid_ptr, grid_ptr, grid_ptr + tsize, RunningTag()); return bInfo; } template<unsigned dims, unsigned d, typename F, typename A> inline void fill(const std::vector<long> &tile, const std::vector<long> &stride, bElem *arr, A a, F f, RunningTag t) { for (long s = 0; s < tile[d - 1]; ++s) fill<dims, d - 1>(tile, stride, arr + s * stride[d - 1], a[s], f, TagSelect<d - 1>::value); } template<unsigned dims, unsigned d, typename F, typename A> inline void fill(const std::vector<long> &tile, const std::vector<long> &stride, bElem *arr, A &a, F f, StopTag t) { f(a, arr); } template<unsigned dims, unsigned d, typename T, typename F> inline void iter(const std::vector<long> &dimlist, const std::vector<long> &tile, const std::vector<long> &strideA, const std::vector<long> &strideB, const std::vector<long> &padding, const std::vector<long> &ghost, T &brick, bElem *arr, unsigned *grid_ptr, F f, RunningTag t) { constexpr unsigned dimp = d - 1; if (dims == d) { #pragma omp parallel for for (long s = ghost[dimp] / tile[dimp]; s < (dimlist[dimp] + ghost[dimp]) / tile[dimp]; ++s) iter<dims, d - 1>(dimlist, tile, strideA, strideB, padding, ghost, brick, arr + (padding[dimp] + s * tile[dimp]) * strideA[dimp], grid_ptr + s * strideB[dimp], f, TagSelect<dimp>::value); } else { for (long s = ghost[dimp] / tile[dimp]; s < (dimlist[dimp] + ghost[dimp]) / tile[dimp]; ++s) iter<dims, d - 1>(dimlist, tile, strideA, strideB, padding, ghost, brick, arr + (padding[dimp] + s * tile[dimp]) * strideA[dimp], grid_ptr + s * strideB[dimp], f, TagSelect<dimp>::value); } } template<unsigned dims, unsigned d, typename T, typename F> inline void iter(const std::vector<long> &dimlist, const std::vector<long> &tile, const std::vector<long> &strideA, const std::vector<long> &strideB, const std::vector<long> &padding, const std::vector<long> &ghost, T &brick, bElem *arr, unsigned *grid_ptr, F f, StopTag t) { fill<dims, dims>(tile, strideA, arr, brick[*grid_ptr], f, RunningTag()); } /* * Iterate elements side by side in brick and arrays. * * dimlist: the internal regions, iterated * padding: the padding necessary for arrays, skipped * ghost: the padding for both, skipped * f: F (&bElem, *bElem) -> void */ template<unsigned dims, typename F, typename T, unsigned ... BDims> inline void iter_grid(const std::vector<long> &dimlist, const std::vector<long> &padding, const std::vector<long> &ghost, bElem *arr, unsigned *grid_ptr, Brick<Dim<BDims...>, T> &brick, F f) { std::vector<long> strideA; std::vector<long> strideB; std::vector<long> tile = {BDims...}; // Arrays are contiguous first std::reverse(tile.begin(), tile.end()); long sizeA = 1; long sizeB = 1; for (long a = 0; a < dimlist.size(); ++a) { strideA.push_back(sizeA); strideB.push_back(sizeB); sizeA *= (dimlist[a] + 2 * (padding[a] + ghost[a])); sizeB *= ((dimlist[a] + 2 * ghost[a]) / tile[a]); } iter<dims, dims>(dimlist, tile, strideA, strideB, padding, ghost, brick, arr, grid_ptr, f, RunningTag()); } /** * @brief Copy values from an array to bricks * @tparam dims number of dimensions * @tparam T type for brick * @param dimlist dimensions, contiguous first * @param padding padding applied to array format (skipped) * @param ghost padding applied to array and brick (skipped) * @param arr array input * @param grid_ptr the grid array contains indices of bricks * @param brick the brick data structure */ template<unsigned dims, typename T> inline void copyToBrick(const std::vector<long> &dimlist, const std::vector<long> &padding, const std::vector<long> &ghost, bElem *arr, unsigned *grid_ptr, T &brick) { auto f = [](bElem &brick, bElem *arr) -> void { brick = *arr; }; iter_grid<dims>(dimlist, padding, ghost, arr, grid_ptr, brick, f); } /** * @brief Copy values from an array to bricks without ghost or padding * @tparam dims * @tparam T * @param dimlist * @param arr * @param grid_ptr * @param brick * * For parameters see copyToBrick(const std::vector<long> &dimlist, const std::vector<long> &padding, const std::vector<long> &ghost, bElem *arr, unsigned *grid_ptr, T &brick) */ template<unsigned dims, typename T> inline void copyToBrick(const std::vector<long> &dimlist, bElem *arr, unsigned *grid_ptr, T &brick) { std::vector<long> padding(dimlist.size(), 0); std::vector<long> ghost(dimlist.size(), 0); copyToBrick<dims>(dimlist, padding, ghost, arr, grid_ptr, brick); } /** * @brief Copy values from bricks to an array * @tparam dims number of dimensions * @tparam T type for brick * @param dimlist dimensions, contiguous first * @param padding padding applied to array format (skipped) * @param ghost padding applied to array and brick (skipped) * @param arr array input * @param grid_ptr the grid array contains indices of bricks * @param brick the brick data structure */ template<unsigned dims, typename T> inline void copyFromBrick(const std::vector<long> &dimlist, const std::vector<long> &padding, const std::vector<long> &ghost, bElem *arr, unsigned *grid_ptr, T &brick) { auto f = [](bElem &brick, bElem *arr) -> void { *arr = brick; }; iter_grid<dims>(dimlist, padding, ghost, arr, grid_ptr, brick, f); } #endif
geo_region_growing.h
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the copyright holder(s) nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Author : Sergey Ushakov * Email : mine_all_mine@bk.ru * */ /* * Modified by Xin Wang * Email : ericrussell@zju.edu.cn */ #pragma once #include <pcl/pcl_base.h> #include <pcl/search/search.h> #include <pcl/point_cloud.h> #include <pcl/point_types.h> #include <pcl/search/kdtree.h> #include <list> #include <math.h> #include <time.h> #include <queue> #include "omp.h" /** \brief * Implements the well known Region Growing algorithm used for segmentation. * Description can be found in the article * "Segmentation of point clouds using smoothness constraint" * by T. Rabbania, F. A. van den Heuvelb, G. Vosselmanc. * In addition to residual test, the possibility to test curvature is added. */ template <typename PointT, typename NormalT> class GeoRegionGrowing : public pcl::PCLBase<PointT> { public: typedef pcl::search::Search <PointT> KdTree; typedef typename KdTree::Ptr KdTreePtr; typedef pcl::PointCloud <NormalT> Normal; typedef typename Normal::Ptr NormalPtr; typedef pcl::PointCloud <PointT> PointCloud; using pcl::PCLBase <PointT>::input_; using pcl::PCLBase <PointT>::indices_; using pcl::PCLBase <PointT>::initCompute; using pcl::PCLBase <PointT>::deinitCompute; public: /** \brief Constructor that sets default values for member variables. */ GeoRegionGrowing (); /** \brief This destructor destroys the cloud, normals and search method used for * finding KNN. In other words it frees memory. */ virtual ~GeoRegionGrowing (); /** \brief Get the minimum number of points that a cluster needs to contain in order to be considered valid. */ int getMinClusterSize (); /** \brief Set the minimum number of points that a cluster needs to contain in order to be considered valid. */ void setMinClusterSize (int min_cluster_size); /** \brief Get the maximum number of points that a cluster needs to contain in order to be considered valid. */ int getMaxClusterSize (); /** \brief Set the maximum number of points that a cluster needs to contain in order to be considered valid. */ void setMaxClusterSize (int max_cluster_size); /** \brief Returns the flag value. This flag signalizes which mode of algorithm will be used. * If it is set to true than it will work as said in the article. This means that * it will be testing the angle between normal of the current point and it's neighbours normal. * Otherwise, it will be testing the angle between normal of the current point * and normal of the initial point that was chosen for growing new segment. */ bool getSmoothModeFlag () const; /** \brief This function allows to turn on/off the smoothness constraint. * \param[in] value new mode value, if set to true then the smooth version will be used. */ void setSmoothModeFlag (bool value); /** \brief Returns the flag that signalize if the curvature test is turned on/off. */ bool getCurvatureTestFlag () const; /** \brief Allows to turn on/off the curvature test. Note that at least one test * (residual or curvature) must be turned on. If you are turning curvature test off * then residual test will be turned on automatically. * \param[in] value new value for curvature test. If set to true then the test will be turned on */ virtual void setCurvatureTestFlag (bool value); /** \brief Returns the flag that signalize if the residual test is turned on/off. */ bool getResidualTestFlag () const; /** \brief * Allows to turn on/off the residual test. Note that at least one test * (residual or curvature) must be turned on. If you are turning residual test off * then curvature test will be turned on automatically. * \param[in] value new value for residual test. If set to true then the test will be turned on */ virtual void setResidualTestFlag (bool value); /** \brief Returns smoothness threshold. */ float getSmoothnessThreshold () const; /** \brief Allows to set smoothness threshold used for testing the points. * \param[in] theta new threshold value for the angle between normals */ void setSmoothnessThreshold (float theta); /** \brief Returns residual threshold. */ float getResidualThreshold () const; /** \brief Allows to set residual threshold used for testing the points. * \param[in] residual new threshold value for residual testing */ void setResidualThreshold (float residual); /** \brief Returns curvature threshold. */ float getCurvatureThreshold () const; /** \brief Allows to set curvature threshold used for testing the points. * \param[in] curvature new threshold value for curvature testing */ void setCurvatureThreshold (float curvature); /** \brief Returns the number of nearest neighbours used for KNN. */ unsigned int getNumberOfNeighbours () const; /** \brief Allows to set the number of neighbours. For more information check the article. * \param[in] neighbour_number number of neighbours to use */ void setNumberOfNeighbours (unsigned int neighbour_number); /** \brief Returns the pointer to the search method that is used for KNN. */ KdTreePtr getSearchMethod () const; /** \brief Allows to set search method that will be used for finding KNN. * \param[in] search search method to use */ void setSearchMethod (const KdTreePtr& tree); /** \brief Returns normals. */ NormalPtr getInputNormals () const; /** \brief This method sets the normals. They are needed for the algorithm, so if * no normals will be set, the algorithm would not be able to segment the points. * \param[in] norm normals that will be used in the algorithm */ void setInputNormals (const NormalPtr& norm); /** \brief This method launches the segmentation algorithm and returns the clusters that were * obtained during the segmentation. * \param[out] clusters clusters that were obtained. Each cluster is an array of point indices. */ virtual void extract (std::vector <pcl::PointIndices>& clusters); /** \brief For a given point this function builds a segment to which it belongs and returns this segment. * \param[in] index index of the initial point which will be the seed for growing a segment. * \param[out] cluster cluster to which the point belongs. */ virtual void getSegmentFromPoint (int index, pcl::PointIndices& cluster); /** \brief If the cloud was successfully segmented, then function * returns colored cloud. Otherwise it returns an empty pointer. * Points that belong to the same segment have the same color. * But this function doesn't guarantee that different segments will have different * color(it all depends on RNG). Points that were not listed in the indices array will have red color. */ pcl::PointCloud<pcl::PointXYZRGB>::Ptr getColoredCloud (); /** \brief If the cloud was successfully segmented, then function * returns colored cloud. Otherwise it returns an empty pointer. * Points that belong to the same segment have the same color. * But this function doesn't guarantee that different segments will have different * color(it all depends on RNG). Points that were not listed in the indices array will have red color. */ pcl::PointCloud<pcl::PointXYZRGBA>::Ptr getColoredCloudRGBA (); protected: /** \brief This method simply checks if it is possible to execute the segmentation algorithm with * the current settings. If it is possible then it returns true. */ virtual bool prepareForSegmentation (); /** \brief This method finds KNN for each point and saves them to the array * because the algorithm needs to find KNN a few times. */ virtual void findPointNeighbours (); /** \brief This function implements the algorithm described in the article * "Segmentation of point clouds using smoothness constraint" * by T. Rabbania, F. A. van den Heuvelb, G. Vosselmanc. */ void applySmoothRegionGrowingAlgorithm (); /** \brief This method grows a segment for the given seed point. And returns the number of its points. * \param[in] initial_seed index of the point that will serve as the seed point * \param[in] segment_number indicates which number this segment will have */ int growRegion (int initial_seed, int segment_number); /** \brief This function is checking if the point with index 'nghbr' belongs to the segment. * If so, then it returns true. It also checks if this point can serve as the seed. * \param[in] initial_seed index of the initial point that was passed to the growRegion() function * \param[in] point index of the current seed point * \param[in] nghbr index of the point that is neighbour of the current seed * \param[out] is_a_seed this value is set to true if the point with index 'nghbr' can serve as the seed */ virtual bool validatePoint (int initial_seed, int point, int nghbr, bool& is_a_seed) const; /** \brief This function simply assembles the regions from list of point labels. * \param[out] clusters clusters that were obtained during the segmentation process. * Each cluster is an array of point indices. */ void assembleRegions (); protected: /** \brief Stores the minimum number of points that a cluster needs to contain in order to be considered valid. */ int min_pts_per_cluster_; /** \brief Stores the maximum number of points that a cluster needs to contain in order to be considered valid. */ int max_pts_per_cluster_; /** \brief Flag that signalizes if the smoothness constraint will be used. */ bool smooth_mode_flag_; /** \brief If set to true then curvature test will be done during segmentation. */ bool curvature_flag_; /** \brief If set to true then residual test will be done during segmentation. */ bool residual_flag_; /** \brief Thershold used for testing the smoothness between points. */ float theta_threshold_; /** \brief Thershold used in residual test. */ float residual_threshold_; /** \brief Thershold used in curvature test. */ float curvature_threshold_; /** \brief Number of neighbours to find. */ unsigned int neighbour_number_; /** \brief Serch method that will be used for KNN. */ KdTreePtr search_; /** \brief Contains normals of the points that will be segmented. */ NormalPtr normals_; /** \brief Contains neighbours of each point. */ std::vector<std::vector<int> > point_neighbours_; /** \brief Point labels that tells to which segment each point belongs. */ std::vector<int> point_labels_; /** \brief If set to true then normal/smoothness test will be done during segmentation. * It is always set to true for the usual region growing algorithm. It is used for turning on/off the test * for smoothness in the child class RegionGrowingRGB.*/ bool normal_flag_; /** \brief Tells how much points each segment contains. Used for reserving memory. */ std::vector<int> num_pts_in_segment_; /** \brief After the segmentation it will contain the segments. */ std::vector <pcl::PointIndices> clusters_; /** \brief Stores the number of segments. */ int number_of_segments_; public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW }; /** \brief This function is used as a comparator for sorting. */ inline bool comparePair (std::pair<float, int> i, std::pair<float, int> j) { return (i.first < j.first); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> GeoRegionGrowing<PointT, NormalT>::GeoRegionGrowing () : min_pts_per_cluster_ (1), max_pts_per_cluster_ (std::numeric_limits<int>::max ()), smooth_mode_flag_ (false), curvature_flag_ (true), residual_flag_ (true), theta_threshold_ (30.0f / 180.0f * static_cast<float> (M_PI)), residual_threshold_ (0.05f), curvature_threshold_ (0.05f), neighbour_number_ (30), search_ (), normals_ (), point_neighbours_ (0), point_labels_ (0), normal_flag_ (true), num_pts_in_segment_ (0), clusters_ (0), number_of_segments_ (0) { } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> GeoRegionGrowing<PointT, NormalT>::~GeoRegionGrowing () { if (search_ != 0) search_.reset (); if (normals_ != 0) normals_.reset (); point_neighbours_.clear (); point_labels_.clear (); num_pts_in_segment_.clear (); clusters_.clear (); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> int GeoRegionGrowing<PointT, NormalT>::getMinClusterSize () { return (min_pts_per_cluster_); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> void GeoRegionGrowing<PointT, NormalT>::setMinClusterSize (int min_cluster_size) { min_pts_per_cluster_ = min_cluster_size; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> int GeoRegionGrowing<PointT, NormalT>::getMaxClusterSize () { return (max_pts_per_cluster_); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> void GeoRegionGrowing<PointT, NormalT>::setMaxClusterSize (int max_cluster_size) { max_pts_per_cluster_ = max_cluster_size; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> bool GeoRegionGrowing<PointT, NormalT>::getSmoothModeFlag () const { return (smooth_mode_flag_); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> void GeoRegionGrowing<PointT, NormalT>::setSmoothModeFlag (bool value) { smooth_mode_flag_ = value; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> bool GeoRegionGrowing<PointT, NormalT>::getCurvatureTestFlag () const { return (curvature_flag_); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> void GeoRegionGrowing<PointT, NormalT>::setCurvatureTestFlag (bool value) { curvature_flag_ = value; if (curvature_flag_ == false && residual_flag_ == false) residual_flag_ = true; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> bool GeoRegionGrowing<PointT, NormalT>::getResidualTestFlag () const { return (residual_flag_); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> void GeoRegionGrowing<PointT, NormalT>::setResidualTestFlag (bool value) { residual_flag_ = value; if (curvature_flag_ == false && residual_flag_ == false) curvature_flag_ = true; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> float GeoRegionGrowing<PointT, NormalT>::getSmoothnessThreshold () const { return (theta_threshold_); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> void GeoRegionGrowing<PointT, NormalT>::setSmoothnessThreshold (float theta) { theta_threshold_ = theta; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> float GeoRegionGrowing<PointT, NormalT>::getResidualThreshold () const { return (residual_threshold_); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> void GeoRegionGrowing<PointT, NormalT>::setResidualThreshold (float residual) { residual_threshold_ = residual; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> float GeoRegionGrowing<PointT, NormalT>::getCurvatureThreshold () const { return (curvature_threshold_); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> void GeoRegionGrowing<PointT, NormalT>::setCurvatureThreshold (float curvature) { curvature_threshold_ = curvature; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> unsigned int GeoRegionGrowing<PointT, NormalT>::getNumberOfNeighbours () const { return (neighbour_number_); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> void GeoRegionGrowing<PointT, NormalT>::setNumberOfNeighbours (unsigned int neighbour_number) { neighbour_number_ = neighbour_number; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> typename GeoRegionGrowing<PointT, NormalT>::KdTreePtr GeoRegionGrowing<PointT, NormalT>::getSearchMethod () const { return (search_); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> void GeoRegionGrowing<PointT, NormalT>::setSearchMethod (const KdTreePtr& tree) { if (search_ != 0) search_.reset (); search_ = tree; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> typename GeoRegionGrowing<PointT, NormalT>::NormalPtr GeoRegionGrowing<PointT, NormalT>::getInputNormals () const { return (normals_); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> void GeoRegionGrowing<PointT, NormalT>::setInputNormals (const NormalPtr& norm) { if (normals_ != 0) normals_.reset (); normals_ = norm; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> void GeoRegionGrowing<PointT, NormalT>::extract (std::vector <pcl::PointIndices>& clusters) { clusters_.clear (); clusters.clear (); point_neighbours_.clear (); point_labels_.clear (); num_pts_in_segment_.clear (); number_of_segments_ = 0; bool segmentation_is_possible = initCompute (); if ( !segmentation_is_possible ) { deinitCompute (); return; } segmentation_is_possible = prepareForSegmentation (); if ( !segmentation_is_possible ) { deinitCompute (); return; } findPointNeighbours (); applySmoothRegionGrowingAlgorithm (); assembleRegions (); clusters.resize (clusters_.size ()); std::vector<pcl::PointIndices>::iterator cluster_iter_input = clusters.begin (); for (std::vector<pcl::PointIndices>::const_iterator cluster_iter = clusters_.begin (); cluster_iter != clusters_.end (); cluster_iter++) { if ((cluster_iter->indices.size () >= min_pts_per_cluster_) && (cluster_iter->indices.size () <= max_pts_per_cluster_)) { *cluster_iter_input = *cluster_iter; cluster_iter_input++; } } clusters_ = std::vector<pcl::PointIndices> (clusters.begin (), cluster_iter_input); clusters.resize(clusters_.size()); deinitCompute (); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> bool GeoRegionGrowing<PointT, NormalT>::prepareForSegmentation () { // if user forgot to pass point cloud or if it is empty if ( input_->points.size () == 0 ) return (false); // if user forgot to pass normals or the sizes of point and normal cloud are different if ( normals_ == 0 || input_->points.size () != normals_->points.size () ) return (false); // if residual test is on then we need to check if all needed parameters were correctly initialized if (residual_flag_) { if (residual_threshold_ <= 0.0f) return (false); } // if curvature test is on ... // if (curvature_flag_) // { // in this case we do not need to check anything that related to it // so we simply commented it // } // from here we check those parameters that are always valuable if (neighbour_number_ == 0) return (false); // if user didn't set search method if (!search_) search_.reset (new pcl::search::KdTree<PointT>); if (indices_) { if (indices_->empty ()) PCL_ERROR ("[pcl::RegionGrowing::prepareForSegmentation] Empty given indices!\n"); search_->setInputCloud (input_, indices_); } else search_->setInputCloud (input_); return (true); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> void GeoRegionGrowing<PointT, NormalT>::findPointNeighbours () { int point_number = static_cast<int> (indices_->size ()); std::vector<int> temp_neighbours; point_neighbours_.resize (input_->points.size (), temp_neighbours); #pragma omp parallel for for (int i_point = 0; i_point < point_number; i_point++) { int point_index = (*indices_)[i_point]; std::vector<int> neighbours; std::vector<float> distances; search_->nearestKSearch (i_point, neighbour_number_, neighbours, distances); point_neighbours_[point_index].swap (neighbours); } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> void GeoRegionGrowing<PointT, NormalT>::applySmoothRegionGrowingAlgorithm () { int num_of_pts = static_cast<int> (indices_->size ()); point_labels_.resize (input_->points.size (), -1); std::vector< std::pair<float, int> > point_residual; std::pair<float, int> pair; point_residual.resize (num_of_pts, pair); if (normal_flag_ == true) { for (int i_point = 0; i_point < num_of_pts; i_point++) { int point_index = (*indices_)[i_point]; point_residual[i_point].first = normals_->points[point_index].curvature; point_residual[i_point].second = point_index; } std::sort (point_residual.begin (), point_residual.end (), comparePair); } else { for (int i_point = 0; i_point < num_of_pts; i_point++) { int point_index = (*indices_)[i_point]; point_residual[i_point].first = 0; point_residual[i_point].second = point_index; } } int seed_counter = 0; int seed = point_residual[seed_counter].second; int segmented_pts_num = 0; int number_of_segments = 0; while (segmented_pts_num < num_of_pts) { int pts_in_segment; pts_in_segment = growRegion (seed, number_of_segments); segmented_pts_num += pts_in_segment; num_pts_in_segment_.push_back (pts_in_segment); number_of_segments++; //find next point that is not segmented yet for (int i_seed = seed_counter + 1; i_seed < num_of_pts; i_seed++) { int index = point_residual[i_seed].second; if (point_labels_[index] == -1) { seed = index; break; } } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> int GeoRegionGrowing<PointT, NormalT>::growRegion (int initial_seed, int segment_number) { std::queue<int> seeds; seeds.push (initial_seed); point_labels_[initial_seed] = segment_number; int num_pts_in_segment = 1; while (!seeds.empty ()) { int curr_seed; curr_seed = seeds.front (); seeds.pop (); size_t i_nghbr = 0; while ( i_nghbr < neighbour_number_ && i_nghbr < point_neighbours_[curr_seed].size () ) { int index = point_neighbours_[curr_seed][i_nghbr]; if (point_labels_[index] != -1) { i_nghbr++; continue; } bool is_a_seed = false; bool belongs_to_segment = validatePoint (initial_seed, curr_seed, index, is_a_seed); if (belongs_to_segment == false) { i_nghbr++; continue; } point_labels_[index] = segment_number; num_pts_in_segment++; if (is_a_seed) { seeds.push (index); } i_nghbr++; }// next neighbour }// next seed return (num_pts_in_segment); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> bool GeoRegionGrowing<PointT, NormalT>::validatePoint (int initial_seed, int point, int nghbr, bool& is_a_seed) const { is_a_seed = true; float cosine_threshold = cosf (theta_threshold_); float cosine_residual_threshold = cosf(residual_threshold_); float data[4]; data[0] = input_->points[point].data[0]; data[1] = input_->points[point].data[1]; data[2] = input_->points[point].data[2]; data[3] = input_->points[point].data[3]; Eigen::Map<Eigen::Vector3f> initial_point (static_cast<float*> (data)); Eigen::Map<Eigen::Vector3f> initial_normal (static_cast<float*> (normals_->points[point].normal)); //check the angle between normals Eigen::Map<Eigen::Vector3f> nghbr_normal (static_cast<float*> (normals_->points[nghbr].normal)); float dot_product = fabsf (nghbr_normal.dot (initial_normal)); if (dot_product < cosine_threshold) { return (false); } if (smooth_mode_flag_ == true) { // check the curvature if needed if (curvature_flag_ && normals_->points[nghbr].curvature > curvature_threshold_) { is_a_seed = false; } } else { // check the curvature if needed if (curvature_flag_ && normals_->points[nghbr].curvature > curvature_threshold_) { is_a_seed = false; } // check the residual if needed Eigen::Map<Eigen::Vector3f> nghbr_normal (static_cast<float*> (normals_->points[nghbr].normal)); Eigen::Map<Eigen::Vector3f> initial_seed_normal (static_cast<float*> (normals_->points[initial_seed].normal)); float dot_product = fabsf (nghbr_normal.dot (initial_seed_normal)); if (dot_product < cosine_residual_threshold) { is_a_seed = false; } } return (true); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> void GeoRegionGrowing<PointT, NormalT>::assembleRegions () { int number_of_segments = static_cast<int> (num_pts_in_segment_.size ()); int number_of_points = static_cast<int> (input_->points.size ()); pcl::PointIndices segment; clusters_.resize (number_of_segments, segment); for (int i_seg = 0; i_seg < number_of_segments; i_seg++) { clusters_[i_seg].indices.resize ( num_pts_in_segment_[i_seg], 0); } std::vector<int> counter; counter.resize (number_of_segments, 0); for (int i_point = 0; i_point < number_of_points; i_point++) { int segment_index = point_labels_[i_point]; if (segment_index != -1) { int point_index = counter[segment_index]; clusters_[segment_index].indices[point_index] = i_point; counter[segment_index] = point_index + 1; } } number_of_segments_ = number_of_segments; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> void GeoRegionGrowing<PointT, NormalT>::getSegmentFromPoint (int index, pcl::PointIndices& cluster) { cluster.indices.clear (); bool segmentation_is_possible = initCompute (); if ( !segmentation_is_possible ) { deinitCompute (); return; } // first of all we need to find out if this point belongs to cloud bool point_was_found = false; int number_of_points = static_cast <int> (indices_->size ()); for (size_t point = 0; point < number_of_points; point++) if ( (*indices_)[point] == index) { point_was_found = true; break; } if (point_was_found) { if (clusters_.empty ()) { point_neighbours_.clear (); point_labels_.clear (); num_pts_in_segment_.clear (); number_of_segments_ = 0; segmentation_is_possible = prepareForSegmentation (); if ( !segmentation_is_possible ) { deinitCompute (); return; } findPointNeighbours (); applySmoothRegionGrowingAlgorithm (); assembleRegions (); } // if we have already made the segmentation, then find the segment // to which this point belongs std::vector <pcl::PointIndices>::iterator i_segment; for (i_segment = clusters_.begin (); i_segment != clusters_.end (); i_segment++) { bool segment_was_found = false; for (size_t i_point = 0; i_point < i_segment->indices.size (); i_point++) { if (i_segment->indices[i_point] == index) { segment_was_found = true; cluster.indices.clear (); cluster.indices.reserve (i_segment->indices.size ()); std::copy (i_segment->indices.begin (), i_segment->indices.end (), std::back_inserter (cluster.indices)); break; } } if (segment_was_found) { break; } }// next segment }// end if point was found deinitCompute (); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> pcl::PointCloud<pcl::PointXYZRGB>::Ptr GeoRegionGrowing<PointT, NormalT>::getColoredCloud () { pcl::PointCloud<pcl::PointXYZRGB>::Ptr colored_cloud; if (!clusters_.empty ()) { colored_cloud = (new pcl::PointCloud<pcl::PointXYZRGB>)->makeShared (); srand (static_cast<unsigned int> (time (0))); std::vector<unsigned char> colors; for (size_t i_segment = 0; i_segment < clusters_.size (); i_segment++) { colors.push_back (static_cast<unsigned char> (rand () % 256)); colors.push_back (static_cast<unsigned char> (rand () % 256)); colors.push_back (static_cast<unsigned char> (rand () % 256)); } colored_cloud->width = input_->width; colored_cloud->height = input_->height; colored_cloud->is_dense = input_->is_dense; for (size_t i_point = 0; i_point < input_->points.size (); i_point++) { pcl::PointXYZRGB point; point.x = *(input_->points[i_point].data); point.y = *(input_->points[i_point].data + 1); point.z = *(input_->points[i_point].data + 2); point.r = 255; point.g = 0; point.b = 0; colored_cloud->points.push_back (point); } std::vector< pcl::PointIndices >::iterator i_segment; int next_color = 0; for (i_segment = clusters_.begin (); i_segment != clusters_.end (); i_segment++) { std::vector<int>::iterator i_point; for (i_point = i_segment->indices.begin (); i_point != i_segment->indices.end (); i_point++) { int index; index = *i_point; colored_cloud->points[index].r = colors[3 * next_color]; colored_cloud->points[index].g = colors[3 * next_color + 1]; colored_cloud->points[index].b = colors[3 * next_color + 2]; } next_color++; } } return (colored_cloud); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename PointT, typename NormalT> pcl::PointCloud<pcl::PointXYZRGBA>::Ptr GeoRegionGrowing<PointT, NormalT>::getColoredCloudRGBA () { pcl::PointCloud<pcl::PointXYZRGBA>::Ptr colored_cloud; if (!clusters_.empty ()) { colored_cloud = (new pcl::PointCloud<pcl::PointXYZRGBA>)->makeShared (); srand (static_cast<unsigned int> (time (0))); std::vector<unsigned char> colors; for (size_t i_segment = 0; i_segment < clusters_.size (); i_segment++) { colors.push_back (static_cast<unsigned char> (rand () % 256)); colors.push_back (static_cast<unsigned char> (rand () % 256)); colors.push_back (static_cast<unsigned char> (rand () % 256)); } colored_cloud->width = input_->width; colored_cloud->height = input_->height; colored_cloud->is_dense = input_->is_dense; for (size_t i_point = 0; i_point < input_->points.size (); i_point++) { pcl::PointXYZRGBA point; point.x = *(input_->points[i_point].data); point.y = *(input_->points[i_point].data + 1); point.z = *(input_->points[i_point].data + 2); point.r = 255; point.g = 0; point.b = 0; point.a = 0; colored_cloud->points.push_back (point); } std::vector< pcl::PointIndices >::iterator i_segment; int next_color = 0; for (i_segment = clusters_.begin (); i_segment != clusters_.end (); i_segment++) { std::vector<int>::iterator i_point; for (i_point = i_segment->indices.begin (); i_point != i_segment->indices.end (); i_point++) { int index; index = *i_point; colored_cloud->points[index].r = colors[3 * next_color]; colored_cloud->points[index].g = colors[3 * next_color + 1]; colored_cloud->points[index].b = colors[3 * next_color + 2]; } next_color++; } } return (colored_cloud); }
rnn_helpers.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #ifdef _WIN32 #pragma warning(disable : 4267) #endif #include <algorithm> #include <functional> #include <future> #include <string> #include <vector> #include "gsl/span" #include "gsl/gsl_algorithm" #include "core/common/common.h" #include "core/common/logging/logging.h" #include "core/framework/allocator.h" #include "core/util/math.h" #include "core/util/math_cpuonly.h" #include "core/platform/threadpool.h" namespace onnxruntime { class Tensor; class OpKernelContext; namespace rnn { namespace detail { enum Direction { kForward = 0, kReverse = 1, kBidirectional = 2 }; inline Direction MakeDirection(const std::string& direction) { if (direction == "forward") { return kForward; } if (direction == "reverse") { return kReverse; } if (direction == "bidirectional") { return kBidirectional; } ORT_THROW("Invalid 'direction' argument of '", direction, "'. Must be one of 'forward', 'reverse', or 'bidirectional'."); } /** Allocate a unique_ptr using allocator_, and return a span to the allocated memory so usage is safe @param allocator IAllocator to use for the allocation. @param size Allocation size. Number of elements of type TAlloc, or total size if TAlloc is 'void'. @param unique_ptr unique_ptr that will control the lifetime of the allocated memory. @param fill If true, fill the allocated memory with fill_value. @param fill_value Value to use if 'fill' is true. @returns A span to provide bounds checked access to the allocated memory. */ template <typename TAlloc> gsl::span<TAlloc> Allocate(std::shared_ptr<IAllocator> allocator, size_t size, IAllocatorUniquePtr<TAlloc>& unique_ptr, bool fill = false, TAlloc fill_value = TAlloc{}) { unique_ptr = IAllocator::MakeUniquePtr<TAlloc>(allocator, size); auto span = gsl::make_span(unique_ptr.get(), size); if (fill) { // Do't use span.begin() it will cause performance issue and stop compiler to optimize the code std::fill_n(unique_ptr.get(), size, fill_value); } return span; } // validate the common inputs to RNN, LSTM and GRU operators Status ValidateCommonRnnInputs(const Tensor& X, const Tensor& W, const Tensor& R, const Tensor* B, int WRB_dim_1_multipler, // multiplier used with hidden_size for W, R and B inputs const Tensor* sequence_lens, const Tensor* initial_h, int64_t num_directions, int64_t hidden_size); /// Copy an input array repeatedly to an output array /// @param input_begin Beginning of input /// @param input_end End of input /// @param output Output iterator /// @param repetitions Number of times to repeat copy. Assumes output is sufficiently sized. /// @returns Position of output iterator after copy is completed template <typename TInIter, typename TOutIter> TOutIter RepeatVectorToConstructArray(TInIter input_begin, TInIter input_end, TOutIter output, int64_t repetitions) { for (int64_t i = 0; i < repetitions; i++) { output = std::copy(input_begin, input_end, output); } return output; } // reverse an LSTM or GRU sequence which has shape [seq_length, batch_size, hidden_size] // and output to shape [seq_length, num_directions, batch_size, hidden_size] template <typename T> void ReverseSequence(gsl::span<const T> inputs, gsl::span<T> inputs_reverse, gsl::span<const int> sequence_lengths, const int max_sequence_length, const int batch_size, const int input_size, const int num_directions) { for (int i = 0; i < batch_size; i++) { int seq_len = sequence_lengths[i]; #ifdef USE_OPENMP // Parallel execute the loop. #pragma omp parallel for #endif for (int j = 0; j < seq_len; j++) { gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size); gsl::span<T> dest = inputs_reverse.subspan(num_directions * (seq_len - j - 1) * batch_size * input_size + i * input_size, input_size); // Use gsl::copy instead of std::copy() to allow compiler to optimize the code gsl::copy(src, dest); } #ifdef USE_OPENMP // Parallel execute the loop. #pragma omp parallel for #endif for (int j = seq_len; j < max_sequence_length; j++) { gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size); gsl::span<T> dest = inputs_reverse.subspan(num_directions * j * batch_size * input_size + i * input_size, input_size); // Use gsl::copy instead of std::copy() to allow compiler to optimize the code gsl::copy(src, dest); } } } // A has size M x K, B has size N x K (transposed), and C has size M x N // We check that A, B and C are large enough before calling the lower level GEMM implementation template <typename TSpanAIter, typename TSpanBIter, typename TSpanCIter> void ComputeGemm(const int M, const int N, const int K, const float alpha, TSpanAIter A, TSpanAIter A_end, const int lda, TSpanBIter B, TSpanBIter B_end, const int ldb, const float beta, TSpanCIter C, TSpanCIter C_end, const int ldc, concurrency::ThreadPool* tp) { // validate all the inputs // need to use the lda/ldb/ldc strides which should be >= the columns for the span ORT_ENFORCE(lda >= K && ldb >= K && ldc >= N); ORT_ENFORCE(A + (M * lda - (lda - K)) <= A_end); ORT_ENFORCE(B + (N * ldb - (ldb - K)) <= B_end); ORT_ENFORCE(C + (M * ldc - (ldc - N)) <= C_end); ::onnxruntime::math::GemmEx<float>( CblasNoTrans, CblasTrans, M, N, K, alpha, &*A, lda, &*B, ldb, beta, &*C, ldc, tp); } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> const T* SafeRawConstPointer(typename gsl::span<T>::const_iterator cur, typename gsl::span<T>::const_iterator end, size_t size) { ORT_ENFORCE(cur + size <= end); return &*cur; } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> const T* SafeRawConstPointer(gsl::span<T> span, size_t offset, size_t size) { ORT_ENFORCE(offset + size <= size_t(span.size())); return span.data(); } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> T* SafeRawPointer(typename gsl::span<T>::iterator cur, typename gsl::span<T>::iterator end, size_t size) { ORT_ENFORCE(cur + size <= end); return &*cur; } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> T* SafeRawPointer(typename gsl::span<T> span, size_t offset, size_t size) { ORT_ENFORCE(offset + size <= size_t(span.size())); return span.data() + offset; } template <typename TLambda> void ExecuteLambdaInParallel(const std::string& name, TLambda lambda, int max, int step, onnxruntime::concurrency::ThreadPool& ttp, const ::onnxruntime::logging::Logger& logger) { // #define NOTHREADS to execute the lambdas directly and in order if you need to do that to debug #ifdef NOTHREADS ORT_UNUSED_PARAMETER(ttp); ORT_UNUSED_PARAMETER(logger); for (int i = 0; i < max; i += step) { (void)name; std::bind(lambda, i)(); } #else ORT_UNUSED_PARAMETER(name); ORT_UNUSED_PARAMETER(logger); // ORT_ENFORCE may and does throw at times from within the tasks that run // on a thread-pool. Without propagating exceptions the process exits silently // which will make diagnosing bugs more difficult. // \! UGLY // We have a problem here with the current thread-pool is that it takes std::function // by value and copies it more than once (even though it is movable). // // To report status and exceptions properly it's better to use // futures and promises but they are not copyable, so we can't come up with a functor // with a promise member and we are downgrading to C++11 where we can't have captures that moved in. // // At the same time promises MUST live in the child thread so if we throw from the main thread // we don't destroy any promises that are on the main thread stack which children threads may still be using. // // The only solution with the current Eigen that comes to mind is to have shared_ptr to with std::promise. // const int total_tasks = max / (step > 0 ? step : 1) + (max % step > 0 ? 1 : 0); std::vector<std::future<void> > futures; futures.reserve(total_tasks); for (int i = 0, t = 0; i < max; i += step, ++t) { auto p_ptr = std::make_shared<std::promise<void> >(); futures.push_back(p_ptr->get_future()); ttp.Schedule([p_ptr, lambda, i]() { try { lambda(i); p_ptr->set_value(); } catch (...) { p_ptr->set_exception(std::current_exception()); } }); } // We'd like to wait until all of the tasks have finished // even though one or more have already thrown. We will store // the first exception and then will re-throw at the end. std::exception_ptr pending_exception; for (auto& fut : futures) { try { // get() will re-throw any exceptions // the running task may throw fut.get(); } catch (...) { if (!pending_exception) { pending_exception = std::current_exception(); } } } if (pending_exception) { std::rethrow_exception(pending_exception); } #endif } void DumpMatrixImpl(const std::string& name, const float* src, int row, int col, int offset = 0, int col_width = -1); // Helper class to wrap the processing of the activation funcs and any alpha/beta values. // The alpha/beta values are consumed in the order of the activation funcs. once they run out // defaults will be used as needed. // The Entries property contains the normalized function names and the alpha/beta value to use. class ActivationFuncs { public: struct Entry { const std::string name; const float alpha; const float beta; }; ActivationFuncs() = default; ActivationFuncs(const std::vector<std::string>& funcs, const std::vector<float>& alphas, const std::vector<float>& betas); const std::vector<Entry>& Entries() const { return entries_; } private: std::vector<Entry> entries_; }; namespace deepcpu { using AddBiasIntoFuncPtr = void (*)(const float*, float*, const int); using ClipWithBiasFuncPtr = void (*)(float, const float*, float*, const int); using ActivationFuncPtr = void (*)(float*, int, float, float); using ActivationFuncBPtr = void (*)(const float*, float*, int, float, float); using LstmMergeGatesFuncPtr = void (*)(const float*, float*, const float*, float*, int, float, float); using GruResetGateFuncPtr = void (*)(const float*, float*, float*, int, float, float); using GruOutputGateFuncPtr = void (*)(float*, const float*, const float*, float*, int, float, float); ActivationFuncPtr ActivationFuncByName(const std::string& func); LstmMergeGatesFuncPtr LstmMergeGatesFuncByName(const std::string& func); GruResetGateFuncPtr GruResetGateFuncByName(const std::string& func); GruOutputGateFuncPtr GruOutputGateFuncByName(const std::string& func); void add_bias_into_ignore(const float* ignored, const float* pd, int c); void add_bias_into(const float* ps, float* pd, int c); void clip(float b, float* pd, int c); void clip_add_bias(float b, const float* pb, float* pd, int c); void clip_ignore_bias(float b, const float* pb, float* pd, int c); void sigmoid_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void tanh_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void relu_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void sigmoid_exact_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void tanh_exact_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void sigmoid(float* pd, int c, float alpha, float beta); void tanh(float* pd, int c, float alpha, float beta); void relu(float* pd, int c, float alpha, float beta); void sigmoid_exact(float* pd, int c, float alpha, float beta); void tanh_exact(float* pd, int c, float alpha, float beta); void merge_lstm_gates_to_memory(const float* pprev, const float* pi, const float* pf, const float* pg, float* pcurr, int c); void gru_reset_gate_tanh(const float* ps1, float* ps2, float* pd, int c, float alpha, float beta); void gru_reset_gate_sigmoid(const float* ps1, float* ps2, float* pd, int c, float alpha, float beta); void gru_reset_gate_relu(const float* ps1, const float* ps2, float* pd, int c, float alpha, float beta); void gru_output_gate_tanh(float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); void gru_output_gate_sigmoid(float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); void gru_output_gate_relu(const float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); inline void elementwise_product(const float* op1, const float* op2, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += op1[i] * op2[i]; } inline void elementwise_sum1(const float* src, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += src[i]; } inline void elementwise_sum2(const float* src1, const float* src2, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += src1[i] + src2[i]; } } // namespace deepcpu } // namespace detail } // namespace rnn } // namespace onnxruntime
onyx_if.c
/* * Copyright (c) 2010 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include "onyxc_int.h" #include "onyx_int.h" #include "systemdependent.h" #include "quantize.h" #include "alloccommon.h" #include "mcomp.h" #include "firstpass.h" #include "psnr.h" #include "vpx_scale/vpxscale.h" #include "extend.h" #include "ratectrl.h" #include "quant_common.h" #include "segmentation.h" #include "g_common.h" #include "vpx_scale/yv12extend.h" #include "postproc.h" #include "vpx_mem/vpx_mem.h" #include "swapyv12buffer.h" #include "threading.h" #include "vpx_ports/vpx_timer.h" #include "vpxerrors.h" #include "temporal_filter.h" #if ARCH_ARM #include "vpx_ports/arm.h" #endif #include <math.h> #include <stdio.h> #include <limits.h> #if CONFIG_RUNTIME_CPU_DETECT #define IF_RTCD(x) (x) #define RTCD(x) &cpi->common.rtcd.x #else #define IF_RTCD(x) NULL #define RTCD(x) NULL #endif extern void vp8cx_init_mv_bits_sadcost(); extern void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi); extern void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val); extern void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi); extern void vp8_init_loop_filter(VP8_COMMON *cm); extern void vp8_loop_filter_frame(VP8_COMMON *cm, MACROBLOCKD *mbd, int filt_val); extern void vp8_loop_filter_frame_yonly(VP8_COMMON *cm, MACROBLOCKD *mbd, int filt_val, int sharpness_lvl); extern void vp8_dmachine_specific_config(VP8_COMP *cpi); extern void vp8_cmachine_specific_config(VP8_COMP *cpi); extern void vp8_calc_auto_iframe_target_size(VP8_COMP *cpi); extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *post, int filt_lvl, int low_var_thresh, int flag); extern void print_parms(VP8_CONFIG *ocf, char *filenam); extern unsigned int vp8_get_processor_freq(); extern void print_tree_update_probs(); extern void vp8cx_create_encoder_threads(VP8_COMP *cpi); extern void vp8cx_remove_encoder_threads(VP8_COMP *cpi); #if HAVE_ARMV7 extern void vp8_yv12_copy_frame_func_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc); extern void vp8_yv12_copy_src_frame_func_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc); #endif int vp8_estimate_entropy_savings(VP8_COMP *cpi); int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd); int vp8_calc_low_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd); static void set_default_lf_deltas(VP8_COMP *cpi); extern const int vp8_gf_interval_table[101]; #if CONFIG_PSNR #include "math.h" extern double vp8_calc_ssim ( YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, int lumamask, double *weight ); extern double vp8_calc_ssimg ( YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, double *ssim_y, double *ssim_u, double *ssim_v ); #endif #ifdef OUTPUT_YUV_SRC FILE *yuv_file; #endif #if 0 FILE *framepsnr; FILE *kf_list; FILE *keyfile; #endif #if 0 extern int skip_true_count; extern int skip_false_count; #endif #ifdef ENTROPY_STATS extern int intra_mode_stats[10][10][10]; #endif #ifdef SPEEDSTATS unsigned int frames_at_speed[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; unsigned int tot_pm = 0; unsigned int cnt_pm = 0; unsigned int tot_ef = 0; unsigned int cnt_ef = 0; #endif #ifdef MODE_STATS extern unsigned __int64 Sectionbits[50]; extern int y_modes[5] ; extern int uv_modes[4] ; extern int b_modes[10] ; extern int inter_y_modes[10] ; extern int inter_uv_modes[4] ; extern unsigned int inter_b_modes[15]; #endif extern void (*vp8_short_fdct4x4)(short *input, short *output, int pitch); extern void (*vp8_short_fdct8x4)(short *input, short *output, int pitch); extern const int vp8_bits_per_mb[2][QINDEX_RANGE]; extern const int qrounding_factors[129]; extern const int qzbin_factors[129]; extern void vp8cx_init_quantizer(VP8_COMP *cpi); extern const int vp8cx_base_skip_false_prob[128]; // Tables relating active max Q to active min Q static const int kf_low_motion_minq[QINDEX_RANGE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 10,10, 11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18, 19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26, 27,27,28,28,29,29,30,30,31,32,33,34,35,36,37,38, }; static const int kf_high_motion_minq[QINDEX_RANGE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10,10, 11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18, 19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26, 27,27,28,28,29,29,30,30,31,31,32,32,33,33,34,34, 35,35,36,36,37,38,39,40,41,42,43,44,45,46,47,48, }; /*static const int kf_minq[QINDEX_RANGE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10,10,11,11,12,12,13,13,14,14, 15,15,16,16,17,17,18,18,19,19,20,20,21,21,22,22, 23,23,24,24,25,25,26,26,27,27,28,28,29,29,30,30, 31,31,32,32,33,33,34,34,35,35,36,36,37,37,38,38 };*/ static const int gf_low_motion_minq[QINDEX_RANGE] = { 0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2, 3,3,3,3,4,4,4,4,5,5,5,5,6,6,6,6, 7,7,7,7,8,8,8,8,9,9,9,9,10,10,10,10, 11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18, 19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26, 27,27,28,28,29,29,30,30,31,31,32,32,33,33,34,34, 35,35,36,36,37,37,38,38,39,39,40,40,41,41,42,42, 43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58 }; static const int gf_mid_motion_minq[QINDEX_RANGE] = { 0,0,0,0,1,1,1,1,1,1,2,2,3,3,3,4, 4,4,5,5,5,6,6,6,7,7,7,8,8,8,9,9, 9,10,10,10,10,11,11,11,12,12,12,12,13,13,13,14, 14,14,15,15,16,16,17,17,18,18,19,19,20,20,21,21, 22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29, 30,30,31,31,32,32,33,33,34,34,35,35,36,36,37,37, 38,39,39,40,40,41,41,42,42,43,43,44,45,46,47,48, 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64, }; static const int gf_high_motion_minq[QINDEX_RANGE] = { 0,0,0,0,1,1,1,1,1,2,2,2,3,3,3,4, 4,4,5,5,5,6,6,6,7,7,7,8,8,8,9,9, 9,10,10,10,11,11,12,12,13,13,14,14,15,15,16,16, 17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24, 25,25,26,26,27,27,28,28,29,29,30,30,31,31,32,32, 33,33,34,34,35,35,36,36,37,37,38,38,39,39,40,40, 41,41,42,42,43,44,45,46,47,48,49,50,51,52,53,54, 55,56,57,58,59,60,62,64,66,68,70,72,74,76,78,80, }; /*static const int gf_arf_minq[QINDEX_RANGE] = { 0,0,0,0,1,1,1,1,1,1,2,2,3,3,3,4, 4,4,5,5,5,6,6,6,7,7,7,8,8,8,9,9, 9,10,10,10,11,11,11,12,12,12,13,13,13,14,14,14, 15,15,16,16,17,17,18,18,19,19,20,20,21,21,22,22, 23,23,24,24,25,25,26,26,27,27,28,28,29,29,30,30, 31,31,32,32,33,33,34,34,35,35,36,36,37,37,38,39, 39,40,40,41,41,42,42,43,43,44,45,46,47,48,49,50, 51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66 };*/ static const int inter_minq[QINDEX_RANGE] = { 0,0,0,0,1,1,2,3,3,4,4,5,6,6,7,7, 8,8,9,9,10,11,11,12,12,13,13,14,14,15,15,16, 16,17,17,17,18,18,19,19,20,20,21,21,22,22,22,23, 23,24,24,24,25,25,26,27,28,28,29,30,31,32,33,34, 35,35,36,37,38,39,39,40,41,42,43,43,44,45,46,47, 47,48,49,49,51,52,53,54,54,55,56,56,57,57,58,58, 59,59,60,61,61,62,62,63,64,64,65,66,67,67,68,69, 69,70,71,71,72,73,74,75,76,76,77,78,79,80,81,81, }; void vp8_initialize() { static int init_done = 0; if (!init_done) { vp8_scale_machine_specific_config(); vp8_initialize_common(); //vp8_dmachine_specific_config(); vp8_tokenize_initialize(); vp8cx_init_mv_bits_sadcost(); init_done = 1; } } #ifdef PACKET_TESTING extern FILE *vpxlogc; #endif static void setup_features(VP8_COMP *cpi) { // Set up default state for MB feature flags cpi->mb.e_mbd.segmentation_enabled = 0; cpi->mb.e_mbd.update_mb_segmentation_map = 0; cpi->mb.e_mbd.update_mb_segmentation_data = 0; vpx_memset(cpi->mb.e_mbd.mb_segment_tree_probs, 255, sizeof(cpi->mb.e_mbd.mb_segment_tree_probs)); vpx_memset(cpi->mb.e_mbd.segment_feature_data, 0, sizeof(cpi->mb.e_mbd.segment_feature_data)); cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0; cpi->mb.e_mbd.mode_ref_lf_delta_update = 0; vpx_memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas)); vpx_memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas)); vpx_memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas)); vpx_memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas)); set_default_lf_deltas(cpi); } void vp8_dealloc_compressor_data(VP8_COMP *cpi) { // Delete sementation map if (cpi->segmentation_map != 0) vpx_free(cpi->segmentation_map); cpi->segmentation_map = 0; if (cpi->active_map != 0) vpx_free(cpi->active_map); cpi->active_map = 0; // Delete first pass motion map if (cpi->fp_motion_map != 0) vpx_free(cpi->fp_motion_map); cpi->fp_motion_map = 0; vp8_de_alloc_frame_buffers(&cpi->common); vp8_yv12_de_alloc_frame_buffer(&cpi->last_frame_uf); vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source); #if VP8_TEMPORAL_ALT_REF vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer.source_buffer); #endif { int i; for (i = 0; i < MAX_LAG_BUFFERS; i++) vp8_yv12_de_alloc_frame_buffer(&cpi->src_buffer[i].source_buffer); cpi->source_buffer_count = 0; } vpx_free(cpi->tok); cpi->tok = 0; // Structure used to minitor GF useage if (cpi->gf_active_flags != 0) vpx_free(cpi->gf_active_flags); cpi->gf_active_flags = 0; if(cpi->mb.pip) vpx_free(cpi->mb.pip); cpi->mb.pip = 0; vpx_free(cpi->total_stats); vpx_free(cpi->this_frame_stats); } static void enable_segmentation(VP8_PTR ptr) { VP8_COMP *cpi = (VP8_COMP *)(ptr); // Set the appropriate feature bit cpi->mb.e_mbd.segmentation_enabled = 1; cpi->mb.e_mbd.update_mb_segmentation_map = 1; cpi->mb.e_mbd.update_mb_segmentation_data = 1; } static void disable_segmentation(VP8_PTR ptr) { VP8_COMP *cpi = (VP8_COMP *)(ptr); // Clear the appropriate feature bit cpi->mb.e_mbd.segmentation_enabled = 0; } // Valid values for a segment are 0 to 3 // Segmentation map is arrange as [Rows][Columns] static void set_segmentation_map(VP8_PTR ptr, unsigned char *segmentation_map) { VP8_COMP *cpi = (VP8_COMP *)(ptr); // Copy in the new segmentation map vpx_memcpy(cpi->segmentation_map, segmentation_map, (cpi->common.mb_rows * cpi->common.mb_cols)); // Signal that the map should be updated. cpi->mb.e_mbd.update_mb_segmentation_map = 1; cpi->mb.e_mbd.update_mb_segmentation_data = 1; } // The values given for each segment can be either deltas (from the default value chosen for the frame) or absolute values. // // Valid range for abs values is (0-127 for MB_LVL_ALT_Q) , (0-63 for SEGMENT_ALT_LF) // Valid range for delta values are (+/-127 for MB_LVL_ALT_Q) , (+/-63 for SEGMENT_ALT_LF) // // abs_delta = SEGMENT_DELTADATA (deltas) abs_delta = SEGMENT_ABSDATA (use the absolute values given). // // static void set_segment_data(VP8_PTR ptr, signed char *feature_data, unsigned char abs_delta) { VP8_COMP *cpi = (VP8_COMP *)(ptr); cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta; vpx_memcpy(cpi->segment_feature_data, feature_data, sizeof(cpi->segment_feature_data)); } static void segmentation_test_function(VP8_PTR ptr) { VP8_COMP *cpi = (VP8_COMP *)(ptr); unsigned char *seg_map; signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; // Create a temporary map for segmentation data. CHECK_MEM_ERROR(seg_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1)); // MB loop to set local segmentation map /*for ( i = 0; i < cpi->common.mb_rows; i++ ) { for ( j = 0; j < cpi->common.mb_cols; j++ ) { //seg_map[(i*cpi->common.mb_cols) + j] = (j % 2) + ((i%2)* 2); //if ( j < cpi->common.mb_cols/2 ) // Segment 1 around the edge else 0 if ( (i == 0) || (j == 0) || (i == (cpi->common.mb_rows-1)) || (j == (cpi->common.mb_cols-1)) ) seg_map[(i*cpi->common.mb_cols) + j] = 1; //else if ( (i < 2) || (j < 2) || (i > (cpi->common.mb_rows-3)) || (j > (cpi->common.mb_cols-3)) ) // seg_map[(i*cpi->common.mb_cols) + j] = 2; //else if ( (i < 5) || (j < 5) || (i > (cpi->common.mb_rows-6)) || (j > (cpi->common.mb_cols-6)) ) // seg_map[(i*cpi->common.mb_cols) + j] = 3; else seg_map[(i*cpi->common.mb_cols) + j] = 0; } }*/ // Set the segmentation Map set_segmentation_map(ptr, seg_map); // Activate segmentation. enable_segmentation(ptr); // Set up the quant segment data feature_data[MB_LVL_ALT_Q][0] = 0; feature_data[MB_LVL_ALT_Q][1] = 4; feature_data[MB_LVL_ALT_Q][2] = 0; feature_data[MB_LVL_ALT_Q][3] = 0; // Set up the loop segment data feature_data[MB_LVL_ALT_LF][0] = 0; feature_data[MB_LVL_ALT_LF][1] = 0; feature_data[MB_LVL_ALT_LF][2] = 0; feature_data[MB_LVL_ALT_LF][3] = 0; // Initialise the feature data structure // SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1 set_segment_data(ptr, &feature_data[0][0], SEGMENT_DELTADATA); // Delete sementation map if (seg_map != 0) vpx_free(seg_map); seg_map = 0; } // A simple function to cyclically refresh the background at a lower Q static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment) { unsigned char *seg_map; signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; int i; int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe; int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols; // Create a temporary map for segmentation data. CHECK_MEM_ERROR(seg_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1)); cpi->cyclic_refresh_q = Q; for (i = Q; i > 0; i--) { if (vp8_bits_per_mb[cpi->common.frame_type][i] >= ((vp8_bits_per_mb[cpi->common.frame_type][Q]*(Q + 128)) / 64)) //if ( vp8_bits_per_mb[cpi->common.frame_type][i] >= ((vp8_bits_per_mb[cpi->common.frame_type][Q]*((2*Q)+96))/64) ) { break; } } cpi->cyclic_refresh_q = i; // Only update for inter frames if (cpi->common.frame_type != KEY_FRAME) { // Cycle through the macro_block rows // MB loop to set local segmentation map for (i = cpi->cyclic_refresh_mode_index; i < mbs_in_frame; i++) { // If the MB is as a candidate for clean up then mark it for possible boost/refresh (segment 1) // The segment id may get reset to 0 later if the MB gets coded anything other than last frame 0,0 // as only (last frame 0,0) MBs are eligable for refresh : that is to say Mbs likely to be background blocks. if (cpi->cyclic_refresh_map[i] == 0) { seg_map[i] = 1; } else { seg_map[i] = 0; // Skip blocks that have been refreshed recently anyway. if (cpi->cyclic_refresh_map[i] < 0) //cpi->cyclic_refresh_map[i] = cpi->cyclic_refresh_map[i] / 16; cpi->cyclic_refresh_map[i]++; } if (block_count > 0) block_count--; else break; } // If we have gone through the frame reset to the start cpi->cyclic_refresh_mode_index = i; if (cpi->cyclic_refresh_mode_index >= mbs_in_frame) cpi->cyclic_refresh_mode_index = 0; } // Set the segmentation Map set_segmentation_map((VP8_PTR)cpi, seg_map); // Activate segmentation. enable_segmentation((VP8_PTR)cpi); // Set up the quant segment data feature_data[MB_LVL_ALT_Q][0] = 0; feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q); feature_data[MB_LVL_ALT_Q][2] = 0; feature_data[MB_LVL_ALT_Q][3] = 0; // Set up the loop segment data feature_data[MB_LVL_ALT_LF][0] = 0; feature_data[MB_LVL_ALT_LF][1] = lf_adjustment; feature_data[MB_LVL_ALT_LF][2] = 0; feature_data[MB_LVL_ALT_LF][3] = 0; // Initialise the feature data structure // SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1 set_segment_data((VP8_PTR)cpi, &feature_data[0][0], SEGMENT_DELTADATA); // Delete sementation map if (seg_map != 0) vpx_free(seg_map); seg_map = 0; } static void set_default_lf_deltas(VP8_COMP *cpi) { cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1; cpi->mb.e_mbd.mode_ref_lf_delta_update = 1; vpx_memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas)); vpx_memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas)); // Test of ref frame deltas cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2; cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0; cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2; cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2; cpi->mb.e_mbd.mode_lf_deltas[0] = 4; // BPRED cpi->mb.e_mbd.mode_lf_deltas[1] = -2; // Zero cpi->mb.e_mbd.mode_lf_deltas[2] = 2; // New mv cpi->mb.e_mbd.mode_lf_deltas[3] = 4; // Split mv } void vp8_set_speed_features(VP8_COMP *cpi) { SPEED_FEATURES *sf = &cpi->sf; int Mode = cpi->compressor_speed; int Speed = cpi->Speed; int i; VP8_COMMON *cm = &cpi->common; // Initialise default mode frequency sampling variables for (i = 0; i < MAX_MODES; i ++) { cpi->mode_check_freq[i] = 0; cpi->mode_test_hit_counts[i] = 0; cpi->mode_chosen_counts[i] = 0; } cpi->mbs_tested_so_far = 0; // best quality sf->RD = 1; sf->search_method = NSTEP; sf->improved_quant = 1; sf->improved_dct = 1; sf->auto_filter = 1; sf->recode_loop = 1; sf->quarter_pixel_search = 1; sf->half_pixel_search = 1; sf->full_freq[0] = 7; sf->full_freq[1] = 7; sf->min_fs_radius = 8; sf->max_fs_radius = 32; sf->iterative_sub_pixel = 1; sf->optimize_coefficients = 1; sf->first_step = 0; sf->max_step_search_steps = MAX_MVSEARCH_STEPS; cpi->do_full[0] = 0; cpi->do_full[1] = 0; // default thresholds to 0 for (i = 0; i < MAX_MODES; i++) sf->thresh_mult[i] = 0; switch (Mode) { #if !(CONFIG_REALTIME_ONLY) case 0: // best quality mode sf->thresh_mult[THR_ZEROMV ] = 0; sf->thresh_mult[THR_ZEROG ] = 0; sf->thresh_mult[THR_ZEROA ] = 0; sf->thresh_mult[THR_NEARESTMV] = 0; sf->thresh_mult[THR_NEARESTG ] = 0; sf->thresh_mult[THR_NEARESTA ] = 0; sf->thresh_mult[THR_NEARMV ] = 0; sf->thresh_mult[THR_NEARG ] = 0; sf->thresh_mult[THR_NEARA ] = 0; sf->thresh_mult[THR_DC ] = 0; sf->thresh_mult[THR_V_PRED ] = 1000; sf->thresh_mult[THR_H_PRED ] = 1000; sf->thresh_mult[THR_B_PRED ] = 2000; sf->thresh_mult[THR_TM ] = 1000; sf->thresh_mult[THR_NEWMV ] = 1000; sf->thresh_mult[THR_NEWG ] = 1000; sf->thresh_mult[THR_NEWA ] = 1000; sf->thresh_mult[THR_SPLITMV ] = 2500; sf->thresh_mult[THR_SPLITG ] = 5000; sf->thresh_mult[THR_SPLITA ] = 5000; sf->full_freq[0] = 7; sf->full_freq[1] = 15; sf->first_step = 0; sf->max_step_search_steps = MAX_MVSEARCH_STEPS; if (!(cpi->ref_frame_flags & VP8_LAST_FLAG)) { sf->thresh_mult[THR_NEWMV ] = INT_MAX; sf->thresh_mult[THR_NEARESTMV] = INT_MAX; sf->thresh_mult[THR_ZEROMV ] = INT_MAX; sf->thresh_mult[THR_NEARMV ] = INT_MAX; sf->thresh_mult[THR_SPLITMV ] = INT_MAX; } if (!(cpi->ref_frame_flags & VP8_GOLD_FLAG)) { sf->thresh_mult[THR_NEARESTG ] = INT_MAX; sf->thresh_mult[THR_ZEROG ] = INT_MAX; sf->thresh_mult[THR_NEARG ] = INT_MAX; sf->thresh_mult[THR_NEWG ] = INT_MAX; sf->thresh_mult[THR_SPLITG ] = INT_MAX; } if (!(cpi->ref_frame_flags & VP8_ALT_FLAG)) { sf->thresh_mult[THR_NEARESTA ] = INT_MAX; sf->thresh_mult[THR_ZEROA ] = INT_MAX; sf->thresh_mult[THR_NEARA ] = INT_MAX; sf->thresh_mult[THR_NEWA ] = INT_MAX; sf->thresh_mult[THR_SPLITA ] = INT_MAX; } break; case 1: case 3: sf->thresh_mult[THR_NEARESTMV] = 0; sf->thresh_mult[THR_ZEROMV ] = 0; sf->thresh_mult[THR_DC ] = 0; sf->thresh_mult[THR_NEARMV ] = 0; sf->thresh_mult[THR_V_PRED ] = 1000; sf->thresh_mult[THR_H_PRED ] = 1000; sf->thresh_mult[THR_B_PRED ] = 2500; sf->thresh_mult[THR_TM ] = 1000; sf->thresh_mult[THR_NEARESTG ] = 1000; sf->thresh_mult[THR_NEARESTA ] = 1000; sf->thresh_mult[THR_ZEROG ] = 1000; sf->thresh_mult[THR_ZEROA ] = 1000; sf->thresh_mult[THR_NEARG ] = 1000; sf->thresh_mult[THR_NEARA ] = 1000; sf->thresh_mult[THR_NEWMV ] = 1500; sf->thresh_mult[THR_NEWG ] = 1500; sf->thresh_mult[THR_NEWA ] = 1500; sf->thresh_mult[THR_SPLITMV ] = 5000; sf->thresh_mult[THR_SPLITG ] = 10000; sf->thresh_mult[THR_SPLITA ] = 10000; sf->full_freq[0] = 15; sf->full_freq[1] = 31; sf->first_step = 0; sf->max_step_search_steps = MAX_MVSEARCH_STEPS; if (!(cpi->ref_frame_flags & VP8_LAST_FLAG)) { sf->thresh_mult[THR_NEWMV ] = INT_MAX; sf->thresh_mult[THR_NEARESTMV] = INT_MAX; sf->thresh_mult[THR_ZEROMV ] = INT_MAX; sf->thresh_mult[THR_NEARMV ] = INT_MAX; sf->thresh_mult[THR_SPLITMV ] = INT_MAX; } if (!(cpi->ref_frame_flags & VP8_GOLD_FLAG)) { sf->thresh_mult[THR_NEARESTG ] = INT_MAX; sf->thresh_mult[THR_ZEROG ] = INT_MAX; sf->thresh_mult[THR_NEARG ] = INT_MAX; sf->thresh_mult[THR_NEWG ] = INT_MAX; sf->thresh_mult[THR_SPLITG ] = INT_MAX; } if (!(cpi->ref_frame_flags & VP8_ALT_FLAG)) { sf->thresh_mult[THR_NEARESTA ] = INT_MAX; sf->thresh_mult[THR_ZEROA ] = INT_MAX; sf->thresh_mult[THR_NEARA ] = INT_MAX; sf->thresh_mult[THR_NEWA ] = INT_MAX; sf->thresh_mult[THR_SPLITA ] = INT_MAX; } if (Speed > 0) { // Disable coefficient optimization above speed 0 sf->optimize_coefficients = 0; cpi->mode_check_freq[THR_SPLITG] = 4; cpi->mode_check_freq[THR_SPLITA] = 4; cpi->mode_check_freq[THR_SPLITMV] = 2; sf->thresh_mult[THR_TM ] = 1500; sf->thresh_mult[THR_V_PRED ] = 1500; sf->thresh_mult[THR_H_PRED ] = 1500; sf->thresh_mult[THR_B_PRED ] = 5000; if (cpi->ref_frame_flags & VP8_LAST_FLAG) { sf->thresh_mult[THR_NEWMV ] = 2000; sf->thresh_mult[THR_SPLITMV ] = 10000; } if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { sf->thresh_mult[THR_NEARESTG ] = 1500; sf->thresh_mult[THR_ZEROG ] = 1500; sf->thresh_mult[THR_NEARG ] = 1500; sf->thresh_mult[THR_NEWG ] = 2000; sf->thresh_mult[THR_SPLITG ] = 20000; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { sf->thresh_mult[THR_NEARESTA ] = 1500; sf->thresh_mult[THR_ZEROA ] = 1500; sf->thresh_mult[THR_NEARA ] = 1500; sf->thresh_mult[THR_NEWA ] = 2000; sf->thresh_mult[THR_SPLITA ] = 20000; } sf->improved_quant = 0; sf->improved_dct = 0; sf->first_step = 1; sf->max_step_search_steps = MAX_MVSEARCH_STEPS; } if (Speed > 1) { cpi->mode_check_freq[THR_SPLITG] = 15; cpi->mode_check_freq[THR_SPLITA] = 15; cpi->mode_check_freq[THR_SPLITMV] = 7; sf->thresh_mult[THR_TM ] = 2000; sf->thresh_mult[THR_V_PRED ] = 2000; sf->thresh_mult[THR_H_PRED ] = 2000; sf->thresh_mult[THR_B_PRED ] = 7500; if (cpi->ref_frame_flags & VP8_LAST_FLAG) { sf->thresh_mult[THR_NEWMV ] = 2000; sf->thresh_mult[THR_SPLITMV ] = 25000; } if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { sf->thresh_mult[THR_NEARESTG ] = 2000; sf->thresh_mult[THR_ZEROG ] = 2000; sf->thresh_mult[THR_NEARG ] = 2000; sf->thresh_mult[THR_NEWG ] = 2500; sf->thresh_mult[THR_SPLITG ] = 50000; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { sf->thresh_mult[THR_NEARESTA ] = 2000; sf->thresh_mult[THR_ZEROA ] = 2000; sf->thresh_mult[THR_NEARA ] = 2000; sf->thresh_mult[THR_NEWA ] = 2500; sf->thresh_mult[THR_SPLITA ] = 50000; } // Only do recode loop on key frames and golden frames sf->recode_loop = 2; sf->full_freq[0] = 31; sf->full_freq[1] = 63; } if (Speed > 2) { sf->auto_filter = 0; // Faster selection of loop filter cpi->mode_check_freq[THR_V_PRED] = 2; cpi->mode_check_freq[THR_H_PRED] = 2; cpi->mode_check_freq[THR_B_PRED] = 2; if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { cpi->mode_check_freq[THR_NEARG] = 2; cpi->mode_check_freq[THR_NEWG] = 4; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { cpi->mode_check_freq[THR_NEARA] = 2; cpi->mode_check_freq[THR_NEWA] = 4; } sf->thresh_mult[THR_SPLITA ] = INT_MAX; sf->thresh_mult[THR_SPLITG ] = INT_MAX; sf->thresh_mult[THR_SPLITMV ] = INT_MAX; sf->full_freq[0] = 63; sf->full_freq[1] = 127; } if (Speed > 3) { cpi->mode_check_freq[THR_V_PRED] = 0; cpi->mode_check_freq[THR_H_PRED] = 0; cpi->mode_check_freq[THR_B_PRED] = 0; cpi->mode_check_freq[THR_NEARG] = 0; cpi->mode_check_freq[THR_NEWG] = 0; cpi->mode_check_freq[THR_NEARA] = 0; cpi->mode_check_freq[THR_NEWA] = 0; sf->auto_filter = 1; sf->recode_loop = 0; // recode loop off sf->RD = 0; // Turn rd off sf->full_freq[0] = INT_MAX; sf->full_freq[1] = INT_MAX; } if (Speed > 4) { sf->auto_filter = 0; // Faster selection of loop filter cpi->mode_check_freq[THR_V_PRED] = 2; cpi->mode_check_freq[THR_H_PRED] = 2; cpi->mode_check_freq[THR_B_PRED] = 2; if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { cpi->mode_check_freq[THR_NEARG] = 2; cpi->mode_check_freq[THR_NEWG] = 4; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { cpi->mode_check_freq[THR_NEARA] = 2; cpi->mode_check_freq[THR_NEWA] = 4; } if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { sf->thresh_mult[THR_NEARESTG ] = 2000; sf->thresh_mult[THR_ZEROG ] = 2000; sf->thresh_mult[THR_NEARG ] = 2000; sf->thresh_mult[THR_NEWG ] = 4000; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { sf->thresh_mult[THR_NEARESTA ] = 2000; sf->thresh_mult[THR_ZEROA ] = 2000; sf->thresh_mult[THR_NEARA ] = 2000; sf->thresh_mult[THR_NEWA ] = 4000; } } break; #endif case 2: sf->optimize_coefficients = 0; sf->recode_loop = 0; sf->auto_filter = 1; sf->iterative_sub_pixel = 1; sf->thresh_mult[THR_NEARESTMV] = 0; sf->thresh_mult[THR_ZEROMV ] = 0; sf->thresh_mult[THR_DC ] = 0; sf->thresh_mult[THR_TM ] = 0; sf->thresh_mult[THR_NEARMV ] = 0; sf->thresh_mult[THR_V_PRED ] = 1000; sf->thresh_mult[THR_H_PRED ] = 1000; sf->thresh_mult[THR_B_PRED ] = 2500; sf->thresh_mult[THR_NEARESTG ] = 1000; sf->thresh_mult[THR_ZEROG ] = 1000; sf->thresh_mult[THR_NEARG ] = 1000; sf->thresh_mult[THR_NEARESTA ] = 1000; sf->thresh_mult[THR_ZEROA ] = 1000; sf->thresh_mult[THR_NEARA ] = 1000; sf->thresh_mult[THR_NEWMV ] = 2000; sf->thresh_mult[THR_NEWG ] = 2000; sf->thresh_mult[THR_NEWA ] = 2000; sf->thresh_mult[THR_SPLITMV ] = 5000; sf->thresh_mult[THR_SPLITG ] = 10000; sf->thresh_mult[THR_SPLITA ] = 10000; sf->full_freq[0] = 15; sf->full_freq[1] = 31; sf->search_method = NSTEP; if (!(cpi->ref_frame_flags & VP8_LAST_FLAG)) { sf->thresh_mult[THR_NEWMV ] = INT_MAX; sf->thresh_mult[THR_NEARESTMV] = INT_MAX; sf->thresh_mult[THR_ZEROMV ] = INT_MAX; sf->thresh_mult[THR_NEARMV ] = INT_MAX; sf->thresh_mult[THR_SPLITMV ] = INT_MAX; } if (!(cpi->ref_frame_flags & VP8_GOLD_FLAG)) { sf->thresh_mult[THR_NEARESTG ] = INT_MAX; sf->thresh_mult[THR_ZEROG ] = INT_MAX; sf->thresh_mult[THR_NEARG ] = INT_MAX; sf->thresh_mult[THR_NEWG ] = INT_MAX; sf->thresh_mult[THR_SPLITG ] = INT_MAX; } if (!(cpi->ref_frame_flags & VP8_ALT_FLAG)) { sf->thresh_mult[THR_NEARESTA ] = INT_MAX; sf->thresh_mult[THR_ZEROA ] = INT_MAX; sf->thresh_mult[THR_NEARA ] = INT_MAX; sf->thresh_mult[THR_NEWA ] = INT_MAX; sf->thresh_mult[THR_SPLITA ] = INT_MAX; } if (Speed > 0) { cpi->mode_check_freq[THR_SPLITG] = 4; cpi->mode_check_freq[THR_SPLITA] = 4; cpi->mode_check_freq[THR_SPLITMV] = 2; sf->thresh_mult[THR_DC ] = 0; sf->thresh_mult[THR_TM ] = 1000; sf->thresh_mult[THR_V_PRED ] = 2000; sf->thresh_mult[THR_H_PRED ] = 2000; sf->thresh_mult[THR_B_PRED ] = 5000; if (cpi->ref_frame_flags & VP8_LAST_FLAG) { sf->thresh_mult[THR_NEARESTMV] = 0; sf->thresh_mult[THR_ZEROMV ] = 0; sf->thresh_mult[THR_NEARMV ] = 0; sf->thresh_mult[THR_NEWMV ] = 2000; sf->thresh_mult[THR_SPLITMV ] = 10000; } if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { sf->thresh_mult[THR_NEARESTG ] = 1000; sf->thresh_mult[THR_ZEROG ] = 1000; sf->thresh_mult[THR_NEARG ] = 1000; sf->thresh_mult[THR_NEWG ] = 2000; sf->thresh_mult[THR_SPLITG ] = 20000; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { sf->thresh_mult[THR_NEARESTA ] = 1000; sf->thresh_mult[THR_ZEROA ] = 1000; sf->thresh_mult[THR_NEARA ] = 1000; sf->thresh_mult[THR_NEWA ] = 2000; sf->thresh_mult[THR_SPLITA ] = 20000; } sf->improved_quant = 0; sf->improved_dct = 0; } if (Speed > 1) { cpi->mode_check_freq[THR_SPLITMV] = 7; cpi->mode_check_freq[THR_SPLITG] = 15; cpi->mode_check_freq[THR_SPLITA] = 15; sf->thresh_mult[THR_TM ] = 2000; sf->thresh_mult[THR_V_PRED ] = 2000; sf->thresh_mult[THR_H_PRED ] = 2000; sf->thresh_mult[THR_B_PRED ] = 5000; if (cpi->ref_frame_flags & VP8_LAST_FLAG) { sf->thresh_mult[THR_NEWMV ] = 2000; sf->thresh_mult[THR_SPLITMV ] = 25000; } if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { sf->thresh_mult[THR_NEARESTG ] = 2000; sf->thresh_mult[THR_ZEROG ] = 2000; sf->thresh_mult[THR_NEARG ] = 2000; sf->thresh_mult[THR_NEWG ] = 2500; sf->thresh_mult[THR_SPLITG ] = 50000; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { sf->thresh_mult[THR_NEARESTA ] = 2000; sf->thresh_mult[THR_ZEROA ] = 2000; sf->thresh_mult[THR_NEARA ] = 2000; sf->thresh_mult[THR_NEWA ] = 2500; sf->thresh_mult[THR_SPLITA ] = 50000; } sf->full_freq[0] = 31; sf->full_freq[1] = 63; } if (Speed > 2) { sf->auto_filter = 0; // Faster selection of loop filter cpi->mode_check_freq[THR_V_PRED] = 2; cpi->mode_check_freq[THR_H_PRED] = 2; cpi->mode_check_freq[THR_B_PRED] = 2; if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { cpi->mode_check_freq[THR_NEARG] = 2; cpi->mode_check_freq[THR_NEWG] = 4; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { cpi->mode_check_freq[THR_NEARA] = 2; cpi->mode_check_freq[THR_NEWA] = 4; } sf->thresh_mult[THR_SPLITMV ] = INT_MAX; sf->thresh_mult[THR_SPLITG ] = INT_MAX; sf->thresh_mult[THR_SPLITA ] = INT_MAX; sf->full_freq[0] = 63; sf->full_freq[1] = 127; } if (Speed > 3) { sf->RD = 0; sf->full_freq[0] = INT_MAX; sf->full_freq[1] = INT_MAX; sf->auto_filter = 1; } if (Speed > 4) { sf->auto_filter = 0; // Faster selection of loop filter #if CONFIG_REALTIME_ONLY sf->search_method = HEX; #else sf->search_method = DIAMOND; #endif cpi->mode_check_freq[THR_V_PRED] = 4; cpi->mode_check_freq[THR_H_PRED] = 4; cpi->mode_check_freq[THR_B_PRED] = 4; if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { cpi->mode_check_freq[THR_NEARG] = 2; cpi->mode_check_freq[THR_NEWG] = 4; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { cpi->mode_check_freq[THR_NEARA] = 2; cpi->mode_check_freq[THR_NEWA] = 4; } sf->thresh_mult[THR_TM ] = 2000; sf->thresh_mult[THR_B_PRED ] = 5000; if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { sf->thresh_mult[THR_NEARESTG ] = 2000; sf->thresh_mult[THR_ZEROG ] = 2000; sf->thresh_mult[THR_NEARG ] = 2000; sf->thresh_mult[THR_NEWG ] = 4000; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { sf->thresh_mult[THR_NEARESTA ] = 2000; sf->thresh_mult[THR_ZEROA ] = 2000; sf->thresh_mult[THR_NEARA ] = 2000; sf->thresh_mult[THR_NEWA ] = 4000; } } if (Speed > 5) { // Disable split MB intra prediction mode sf->thresh_mult[THR_B_PRED] = INT_MAX; } if (Speed > 6) { unsigned int i, sum = 0; unsigned int total_mbs = cm->MBs; int thresh; int total_skip; int min = 2000; sf->iterative_sub_pixel = 0; if (cpi->oxcf.encode_breakout > 2000) min = cpi->oxcf.encode_breakout; min >>= 7; for (i = 0; i < min; i++) { sum += cpi->error_bins[i]; } total_skip = sum; sum = 0; // i starts from 2 to make sure thresh started from 2048 for (; i < 1024; i++) { sum += cpi->error_bins[i]; if (10 * sum >= (unsigned int)(cpi->Speed - 6)*(total_mbs - total_skip)) break; } i--; thresh = (i << 7); if (thresh < 2000) thresh = 2000; if (cpi->ref_frame_flags & VP8_LAST_FLAG) { sf->thresh_mult[THR_NEWMV] = thresh; sf->thresh_mult[THR_NEARESTMV ] = thresh >> 1; sf->thresh_mult[THR_NEARMV ] = thresh >> 1; } if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { sf->thresh_mult[THR_NEWG] = thresh << 1; sf->thresh_mult[THR_NEARESTG ] = thresh; sf->thresh_mult[THR_NEARG ] = thresh; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { sf->thresh_mult[THR_NEWA] = thresh << 1; sf->thresh_mult[THR_NEARESTA ] = thresh; sf->thresh_mult[THR_NEARA ] = thresh; } // Disable other intra prediction modes sf->thresh_mult[THR_TM] = INT_MAX; sf->thresh_mult[THR_V_PRED] = INT_MAX; sf->thresh_mult[THR_H_PRED] = INT_MAX; } if (Speed > 8) { sf->quarter_pixel_search = 0; } if (Speed > 9) { int Tmp = cpi->Speed - 8; if (Tmp > 4) Tmp = 4; if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { cpi->mode_check_freq[THR_ZEROG] = 1 << (Tmp - 1); cpi->mode_check_freq[THR_NEARESTG] = 1 << (Tmp - 1); cpi->mode_check_freq[THR_NEARG] = 1 << Tmp; cpi->mode_check_freq[THR_NEWG] = 1 << (Tmp + 1); } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { cpi->mode_check_freq[THR_ZEROA] = 1 << (Tmp - 1); cpi->mode_check_freq[THR_NEARESTA] = 1 << (Tmp - 1); cpi->mode_check_freq[THR_NEARA] = 1 << Tmp; cpi->mode_check_freq[THR_NEWA] = 1 << (Tmp + 1); } cpi->mode_check_freq[THR_NEWMV] = 1 << (Tmp - 1); } cm->filter_type = NORMAL_LOOPFILTER; if (Speed >= 14) cm->filter_type = SIMPLE_LOOPFILTER; if (Speed >= 15) { sf->half_pixel_search = 0; // This has a big hit on quality. Last resort } vpx_memset(cpi->error_bins, 0, sizeof(cpi->error_bins)); }; if (cpi->sf.search_method == NSTEP) { vp8_init3smotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride); } else if (cpi->sf.search_method == DIAMOND) { vp8_init_dsmotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride); } if (cpi->sf.improved_dct) { cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x4); cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short4x4); } else { cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast8x4); cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast4x4); } cpi->mb.short_walsh4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, walsh_short4x4); if (cpi->sf.improved_quant) { cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize, quantb); } else { cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize, fastquantb); } #if CONFIG_RUNTIME_CPU_DETECT cpi->mb.e_mbd.rtcd = &cpi->common.rtcd; #endif if (cpi->sf.iterative_sub_pixel == 1) { cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively; } else if (cpi->sf.quarter_pixel_search) { cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step; } else if (cpi->sf.half_pixel_search) { cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step; } else { cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step; } if (cpi->sf.optimize_coefficients == 1) cpi->mb.optimize = 1 + cpi->is_next_src_alt_ref; else cpi->mb.optimize = 0; if (cpi->common.full_pixel) cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step; #ifdef SPEEDSTATS frames_at_speed[cpi->Speed]++; #endif } static void alloc_raw_frame_buffers(VP8_COMP *cpi) { int i, buffers; buffers = cpi->oxcf.lag_in_frames; if (buffers > MAX_LAG_BUFFERS) buffers = MAX_LAG_BUFFERS; if (buffers < 1) buffers = 1; for (i = 0; i < buffers; i++) if (vp8_yv12_alloc_frame_buffer(&cpi->src_buffer[i].source_buffer, cpi->oxcf.Width, cpi->oxcf.Height, 16)) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate lag buffer"); #if VP8_TEMPORAL_ALT_REF if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer.source_buffer, cpi->oxcf.Width, cpi->oxcf.Height, 16)) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate altref buffer"); #endif cpi->source_buffer_count = 0; } static int vp8_alloc_partition_data(VP8_COMP *cpi) { cpi->mb.pip = vpx_calloc((cpi->common.mb_cols + 1) * (cpi->common.mb_rows + 1), sizeof(PARTITION_INFO)); if(!cpi->mb.pip) return ALLOC_FAILURE; cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1; return 0; } void vp8_alloc_compressor_data(VP8_COMP *cpi) { VP8_COMMON *cm = & cpi->common; int width = cm->Width; int height = cm->Height; if (vp8_alloc_frame_buffers(cm, width, height)) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate frame buffers"); if (vp8_alloc_partition_data(cpi)) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate partition data"); if ((width & 0xf) != 0) width += 16 - (width & 0xf); if ((height & 0xf) != 0) height += 16 - (height & 0xf); if (vp8_yv12_alloc_frame_buffer(&cpi->last_frame_uf, width, height, VP8BORDERINPIXELS)) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate last frame buffer"); if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, width, height, 16)) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate scaled source buffer"); if (cpi->tok != 0) vpx_free(cpi->tok); { unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16; CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok))); } // Data used for real time vc mode to see if gf needs refreshing cpi->inter_zz_count = 0; cpi->gf_bad_count = 0; cpi->gf_update_recommended = 0; // Structures used to minitor GF usage if (cpi->gf_active_flags != 0) vpx_free(cpi->gf_active_flags); CHECK_MEM_ERROR(cpi->gf_active_flags, vpx_calloc(1, cm->mb_rows * cm->mb_cols)); cpi->gf_active_count = cm->mb_rows * cm->mb_cols; cpi->total_stats = vpx_calloc(1, vp8_firstpass_stats_sz(cpi->common.MBs)); cpi->this_frame_stats = vpx_calloc(1, vp8_firstpass_stats_sz(cpi->common.MBs)); if(!cpi->total_stats || !cpi->this_frame_stats) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate firstpass stats"); } // Quant MOD static const int q_trans[] = { 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19, 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79, 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127, }; int vp8_reverse_trans(int x) { int i; for (i = 0; i < 64; i++) if (q_trans[i] >= x) return i; return 63; }; void vp8_new_frame_rate(VP8_COMP *cpi, double framerate) { if(framerate < .1) framerate = 30; cpi->oxcf.frame_rate = framerate; cpi->output_frame_rate = cpi->oxcf.frame_rate; cpi->per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth / cpi->output_frame_rate); cpi->av_per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth / cpi->output_frame_rate); cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth * cpi->oxcf.two_pass_vbrmin_section / 100); cpi->max_gf_interval = (int)(cpi->output_frame_rate / 2) + 2; //cpi->max_gf_interval = (int)(cpi->output_frame_rate * 2 / 3) + 1; //cpi->max_gf_interval = 24; if (cpi->max_gf_interval < 12) cpi->max_gf_interval = 12; // Special conditions when altr ref frame enabled in lagged compress mode if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) { if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1; } } static int rescale(int val, int num, int denom) { int64_t llnum = num; int64_t llden = denom; int64_t llval = val; return llval * llnum / llden; } void vp8_init_config(VP8_PTR ptr, VP8_CONFIG *oxcf) { VP8_COMP *cpi = (VP8_COMP *)(ptr); VP8_COMMON *cm = &cpi->common; if (!cpi) return; cpi->auto_gold = 1; cpi->auto_adjust_gold_quantizer = 1; cpi->goldquantizer = 1; cpi->goldfreq = 7; cpi->auto_adjust_key_quantizer = 1; cpi->keyquantizer = 1; cm->version = oxcf->Version; vp8_setup_version(cm); if (oxcf == 0) { cpi->pass = 0; cpi->auto_worst_q = 0; cpi->oxcf.best_allowed_q = MINQ; cpi->oxcf.worst_allowed_q = MAXQ; cpi->oxcf.end_usage = USAGE_STREAM_FROM_SERVER; cpi->oxcf.starting_buffer_level = 4000; cpi->oxcf.optimal_buffer_level = 5000; cpi->oxcf.maximum_buffer_size = 6000; cpi->oxcf.under_shoot_pct = 90; cpi->oxcf.allow_df = 0; cpi->oxcf.drop_frames_water_mark = 20; cpi->oxcf.allow_spatial_resampling = 0; cpi->oxcf.resample_down_water_mark = 40; cpi->oxcf.resample_up_water_mark = 60; cpi->oxcf.fixed_q = cpi->interquantizer; cpi->filter_type = NORMAL_LOOPFILTER; if (cm->simpler_lpf) cpi->filter_type = SIMPLE_LOOPFILTER; cpi->compressor_speed = 1; cpi->horiz_scale = 0; cpi->vert_scale = 0; cpi->oxcf.two_pass_vbrbias = 50; cpi->oxcf.two_pass_vbrmax_section = 400; cpi->oxcf.two_pass_vbrmin_section = 0; cpi->oxcf.Sharpness = 0; cpi->oxcf.noise_sensitivity = 0; } else cpi->oxcf = *oxcf; switch (cpi->oxcf.Mode) { case MODE_REALTIME: cpi->pass = 0; cpi->compressor_speed = 2; if (cpi->oxcf.cpu_used < -16) { cpi->oxcf.cpu_used = -16; } if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16; break; #if !(CONFIG_REALTIME_ONLY) case MODE_GOODQUALITY: cpi->pass = 0; cpi->compressor_speed = 1; if (cpi->oxcf.cpu_used < -5) { cpi->oxcf.cpu_used = -5; } if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5; break; case MODE_BESTQUALITY: cpi->pass = 0; cpi->compressor_speed = 0; break; case MODE_FIRSTPASS: cpi->pass = 1; cpi->compressor_speed = 1; break; case MODE_SECONDPASS: cpi->pass = 2; cpi->compressor_speed = 1; if (cpi->oxcf.cpu_used < -5) { cpi->oxcf.cpu_used = -5; } if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5; break; case MODE_SECONDPASS_BEST: cpi->pass = 2; cpi->compressor_speed = 0; break; #endif } if (cpi->pass == 0) cpi->auto_worst_q = 1; cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q]; cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q]; if (oxcf->fixed_q >= 0) { if (oxcf->worst_allowed_q < 0) cpi->oxcf.fixed_q = q_trans[0]; else cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q]; if (oxcf->alt_q < 0) cpi->oxcf.alt_q = q_trans[0]; else cpi->oxcf.alt_q = q_trans[oxcf->alt_q]; if (oxcf->key_q < 0) cpi->oxcf.key_q = q_trans[0]; else cpi->oxcf.key_q = q_trans[oxcf->key_q]; if (oxcf->gold_q < 0) cpi->oxcf.gold_q = q_trans[0]; else cpi->oxcf.gold_q = q_trans[oxcf->gold_q]; } cpi->baseline_gf_interval = cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL; cpi->ref_frame_flags = VP8_ALT_FLAG | VP8_GOLD_FLAG | VP8_LAST_FLAG; //cpi->use_golden_frame_only = 0; //cpi->use_last_frame_only = 0; cm->refresh_golden_frame = 0; cm->refresh_last_frame = 1; cm->refresh_entropy_probs = 1; if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) cm->multi_token_partition = (TOKEN_PARTITION) cpi->oxcf.token_partitions; setup_features(cpi); { int i; for (i = 0; i < MAX_MB_SEGMENTS; i++) cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout; } // At the moment the first order values may not be > MAXQ if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ; // local file playback mode == really big buffer if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) { cpi->oxcf.starting_buffer_level = 60000; cpi->oxcf.optimal_buffer_level = 60000; cpi->oxcf.maximum_buffer_size = 240000; } // Convert target bandwidth from Kbit/s to Bit/s cpi->oxcf.target_bandwidth *= 1000; cpi->oxcf.starting_buffer_level = rescale(cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000); if (cpi->oxcf.optimal_buffer_level == 0) cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8; else cpi->oxcf.optimal_buffer_level = rescale(cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000); if (cpi->oxcf.maximum_buffer_size == 0) cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8; else cpi->oxcf.maximum_buffer_size = rescale(cpi->oxcf.maximum_buffer_size, cpi->oxcf.target_bandwidth, 1000); cpi->buffer_level = cpi->oxcf.starting_buffer_level; cpi->bits_off_target = cpi->oxcf.starting_buffer_level; vp8_new_frame_rate(cpi, cpi->oxcf.frame_rate); cpi->worst_quality = cpi->oxcf.worst_allowed_q; cpi->active_worst_quality = cpi->oxcf.worst_allowed_q; cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q; cpi->best_quality = cpi->oxcf.best_allowed_q; cpi->active_best_quality = cpi->oxcf.best_allowed_q; cpi->buffered_mode = (cpi->oxcf.optimal_buffer_level > 0) ? TRUE : FALSE; cpi->rolling_target_bits = cpi->av_per_frame_bandwidth; cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth; cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth; cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth; cpi->total_actual_bits = 0; cpi->total_target_vs_actual = 0; // Only allow dropped frames in buffered mode cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode; cm->filter_type = (LOOPFILTERTYPE) cpi->filter_type; if (!cm->use_bilinear_mc_filter) cm->mcomp_filter_type = SIXTAP; else cm->mcomp_filter_type = BILINEAR; cpi->target_bandwidth = cpi->oxcf.target_bandwidth; cm->Width = cpi->oxcf.Width ; cm->Height = cpi->oxcf.Height ; cpi->intra_frame_target = (4 * (cm->Width + cm->Height) / 15) * 1000; // As per VP8 cm->horiz_scale = cpi->horiz_scale; cm->vert_scale = cpi->vert_scale ; // VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7; cm->sharpness_level = cpi->oxcf.Sharpness; if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL) { int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs); int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs); Scale2Ratio(cm->horiz_scale, &hr, &hs); Scale2Ratio(cm->vert_scale, &vr, &vs); // always go to the next whole number cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs; cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs; } if (((cm->Width + 15) & 0xfffffff0) != cm->yv12_fb[cm->lst_fb_idx].y_width || ((cm->Height + 15) & 0xfffffff0) != cm->yv12_fb[cm->lst_fb_idx].y_height || cm->yv12_fb[cm->lst_fb_idx].y_width == 0) { alloc_raw_frame_buffers(cpi); vp8_alloc_compressor_data(cpi); } // Clamp KF frame size to quarter of data rate if (cpi->intra_frame_target > cpi->target_bandwidth >> 2) cpi->intra_frame_target = cpi->target_bandwidth >> 2; if (cpi->oxcf.fixed_q >= 0) { cpi->last_q[0] = cpi->oxcf.fixed_q; cpi->last_q[1] = cpi->oxcf.fixed_q; } cpi->Speed = cpi->oxcf.cpu_used; // force to allowlag to 0 if lag_in_frames is 0; if (cpi->oxcf.lag_in_frames == 0) { cpi->oxcf.allow_lag = 0; } // Limit on lag buffers as these are not currently dynamically allocated else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS; // YX Temp cpi->last_alt_ref_sei = -1; cpi->is_src_frame_alt_ref = 0; cpi->is_next_src_alt_ref = 0; #if 0 // Experimental RD Code cpi->frame_distortion = 0; cpi->last_frame_distortion = 0; #endif #if VP8_TEMPORAL_ALT_REF cpi->use_weighted_temporal_filter = 0; { int i; cpi->fixed_divide[0] = 0; for (i = 1; i < 512; i++) cpi->fixed_divide[i] = 0x80000 / i; } #endif } /* * This function needs more clean up, i.e. be more tuned torwards * change_config rather than init_config !!!!!!!!!!!!!!!! * YX - 5/28/2009 * */ void vp8_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf) { VP8_COMP *cpi = (VP8_COMP *)(ptr); VP8_COMMON *cm = &cpi->common; if (!cpi) return; if (!oxcf) return; if (cm->version != oxcf->Version) { cm->version = oxcf->Version; vp8_setup_version(cm); } cpi->oxcf = *oxcf; switch (cpi->oxcf.Mode) { case MODE_REALTIME: cpi->pass = 0; cpi->compressor_speed = 2; if (cpi->oxcf.cpu_used < -16) { cpi->oxcf.cpu_used = -16; } if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16; break; #if !(CONFIG_REALTIME_ONLY) case MODE_GOODQUALITY: cpi->pass = 0; cpi->compressor_speed = 1; if (cpi->oxcf.cpu_used < -5) { cpi->oxcf.cpu_used = -5; } if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5; break; case MODE_BESTQUALITY: cpi->pass = 0; cpi->compressor_speed = 0; break; case MODE_FIRSTPASS: cpi->pass = 1; cpi->compressor_speed = 1; break; case MODE_SECONDPASS: cpi->pass = 2; cpi->compressor_speed = 1; if (cpi->oxcf.cpu_used < -5) { cpi->oxcf.cpu_used = -5; } if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5; break; case MODE_SECONDPASS_BEST: cpi->pass = 2; cpi->compressor_speed = 0; break; #endif } if (cpi->pass == 0) cpi->auto_worst_q = 1; cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q]; cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q]; if (oxcf->fixed_q >= 0) { if (oxcf->worst_allowed_q < 0) cpi->oxcf.fixed_q = q_trans[0]; else cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q]; if (oxcf->alt_q < 0) cpi->oxcf.alt_q = q_trans[0]; else cpi->oxcf.alt_q = q_trans[oxcf->alt_q]; if (oxcf->key_q < 0) cpi->oxcf.key_q = q_trans[0]; else cpi->oxcf.key_q = q_trans[oxcf->key_q]; if (oxcf->gold_q < 0) cpi->oxcf.gold_q = q_trans[0]; else cpi->oxcf.gold_q = q_trans[oxcf->gold_q]; } cpi->baseline_gf_interval = cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL; cpi->ref_frame_flags = VP8_ALT_FLAG | VP8_GOLD_FLAG | VP8_LAST_FLAG; //cpi->use_golden_frame_only = 0; //cpi->use_last_frame_only = 0; cm->refresh_golden_frame = 0; cm->refresh_last_frame = 1; cm->refresh_entropy_probs = 1; if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) cm->multi_token_partition = (TOKEN_PARTITION) cpi->oxcf.token_partitions; setup_features(cpi); { int i; for (i = 0; i < MAX_MB_SEGMENTS; i++) cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout; } // At the moment the first order values may not be > MAXQ if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ; // local file playback mode == really big buffer if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) { cpi->oxcf.starting_buffer_level = 60000; cpi->oxcf.optimal_buffer_level = 60000; cpi->oxcf.maximum_buffer_size = 240000; } // Convert target bandwidth from Kbit/s to Bit/s cpi->oxcf.target_bandwidth *= 1000; cpi->oxcf.starting_buffer_level = rescale(cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000); if (cpi->oxcf.optimal_buffer_level == 0) cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8; else cpi->oxcf.optimal_buffer_level = rescale(cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000); if (cpi->oxcf.maximum_buffer_size == 0) cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8; else cpi->oxcf.maximum_buffer_size = rescale(cpi->oxcf.maximum_buffer_size, cpi->oxcf.target_bandwidth, 1000); cpi->buffer_level = cpi->oxcf.starting_buffer_level; cpi->bits_off_target = cpi->oxcf.starting_buffer_level; vp8_new_frame_rate(cpi, cpi->oxcf.frame_rate); cpi->worst_quality = cpi->oxcf.worst_allowed_q; cpi->active_worst_quality = cpi->oxcf.worst_allowed_q; cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q; cpi->best_quality = cpi->oxcf.best_allowed_q; cpi->active_best_quality = cpi->oxcf.best_allowed_q; cpi->buffered_mode = (cpi->oxcf.optimal_buffer_level > 0) ? TRUE : FALSE; cpi->rolling_target_bits = cpi->av_per_frame_bandwidth; cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth; cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth; cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth; cpi->total_actual_bits = 0; cpi->total_target_vs_actual = 0; // Only allow dropped frames in buffered mode cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode; cm->filter_type = (LOOPFILTERTYPE) cpi->filter_type; if (!cm->use_bilinear_mc_filter) cm->mcomp_filter_type = SIXTAP; else cm->mcomp_filter_type = BILINEAR; cpi->target_bandwidth = cpi->oxcf.target_bandwidth; cm->Width = cpi->oxcf.Width ; cm->Height = cpi->oxcf.Height ; cm->horiz_scale = cpi->horiz_scale; cm->vert_scale = cpi->vert_scale ; cpi->intra_frame_target = (4 * (cm->Width + cm->Height) / 15) * 1000; // As per VP8 // VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7; cm->sharpness_level = cpi->oxcf.Sharpness; if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL) { int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs); int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs); Scale2Ratio(cm->horiz_scale, &hr, &hs); Scale2Ratio(cm->vert_scale, &vr, &vs); // always go to the next whole number cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs; cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs; } if (((cm->Width + 15) & 0xfffffff0) != cm->yv12_fb[cm->lst_fb_idx].y_width || ((cm->Height + 15) & 0xfffffff0) != cm->yv12_fb[cm->lst_fb_idx].y_height || cm->yv12_fb[cm->lst_fb_idx].y_width == 0) { alloc_raw_frame_buffers(cpi); vp8_alloc_compressor_data(cpi); } // Clamp KF frame size to quarter of data rate if (cpi->intra_frame_target > cpi->target_bandwidth >> 2) cpi->intra_frame_target = cpi->target_bandwidth >> 2; if (cpi->oxcf.fixed_q >= 0) { cpi->last_q[0] = cpi->oxcf.fixed_q; cpi->last_q[1] = cpi->oxcf.fixed_q; } cpi->Speed = cpi->oxcf.cpu_used; // force to allowlag to 0 if lag_in_frames is 0; if (cpi->oxcf.lag_in_frames == 0) { cpi->oxcf.allow_lag = 0; } // Limit on lag buffers as these are not currently dynamically allocated else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS; // YX Temp cpi->last_alt_ref_sei = -1; cpi->is_src_frame_alt_ref = 0; cpi->is_next_src_alt_ref = 0; #if 0 // Experimental RD Code cpi->frame_distortion = 0; cpi->last_frame_distortion = 0; #endif } #define M_LOG2_E 0.693147180559945309417 #define log2f(x) (log (x) / (float) M_LOG2_E) static void cal_mvsadcosts(int *mvsadcost[2]) { int i = 1; mvsadcost [0] [0] = 300; mvsadcost [1] [0] = 300; do { double z = 256 * (2 * (log2f(2 * i) + .6)); mvsadcost [0][i] = (int) z; mvsadcost [1][i] = (int) z; mvsadcost [0][-i] = (int) z; mvsadcost [1][-i] = (int) z; } while (++i <= mv_max); } VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) { int i; volatile union { VP8_COMP *cpi; VP8_PTR ptr; } ctx; VP8_COMP *cpi; VP8_COMMON *cm; cpi = ctx.cpi = vpx_memalign(32, sizeof(VP8_COMP)); // Check that the CPI instance is valid if (!cpi) return 0; cm = &cpi->common; vpx_memset(cpi, 0, sizeof(VP8_COMP)); if (setjmp(cm->error.jmp)) { VP8_PTR ptr = ctx.ptr; ctx.cpi->common.error.setjmp = 0; vp8_remove_compressor(&ptr); return 0; } cpi->common.error.setjmp = 1; CHECK_MEM_ERROR(cpi->rdtok, vpx_calloc(256 * 3 / 2, sizeof(TOKENEXTRA))); CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site), (MAX_MVSEARCH_STEPS * 8) + 1)); vp8_create_common(&cpi->common); vp8_cmachine_specific_config(cpi); vp8_init_config((VP8_PTR)cpi, oxcf); memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob, sizeof(vp8cx_base_skip_false_prob)); cpi->common.current_video_frame = 0; cpi->kf_overspend_bits = 0; cpi->kf_bitrate_adjustment = 0; cpi->frames_till_gf_update_due = 0; cpi->gf_overspend_bits = 0; cpi->non_gf_bitrate_adjustment = 0; cpi->prob_last_coded = 128; cpi->prob_gf_coded = 128; cpi->prob_intra_coded = 63; // Prime the recent reference frame useage counters. // Hereafter they will be maintained as a sort of moving average cpi->recent_ref_frame_usage[INTRA_FRAME] = 1; cpi->recent_ref_frame_usage[LAST_FRAME] = 1; cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1; cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1; // Set reference frame sign bias for ALTREF frame to 1 (for now) cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1; cpi->gf_decay_rate = 0; cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL; cpi->gold_is_last = 0 ; cpi->alt_is_last = 0 ; cpi->gold_is_alt = 0 ; // Create the encoder segmentation map and set all entries to 0 CHECK_MEM_ERROR(cpi->segmentation_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1)); CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1)); vpx_memset(cpi->active_map , 1, (cpi->common.mb_rows * cpi->common.mb_cols)); cpi->active_map_enabled = 0; // Create the first pass motion map structure and set to 0 // Allocate space for maximum of 15 buffers CHECK_MEM_ERROR(cpi->fp_motion_map, vpx_calloc(15*cpi->common.MBs, 1)); #if 0 // Experimental code for lagged and one pass // Initialise one_pass GF frames stats // Update stats used for GF selection if (cpi->pass == 0) { cpi->one_pass_frame_index = 0; for (i = 0; i < MAX_LAG_BUFFERS; i++) { cpi->one_pass_frame_stats[i].frames_so_far = 0; cpi->one_pass_frame_stats[i].frame_intra_error = 0.0; cpi->one_pass_frame_stats[i].frame_coded_error = 0.0; cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0; cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0; cpi->one_pass_frame_stats[i].frame_mvr = 0.0; cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0; cpi->one_pass_frame_stats[i].frame_mvc = 0.0; cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0; } } #endif // Should we use the cyclic refresh method. // Currently this is tied to error resilliant mode cpi->cyclic_refresh_mode_enabled = cpi->oxcf.error_resilient_mode; cpi->cyclic_refresh_mode_max_mbs_perframe = (cpi->common.mb_rows * cpi->common.mb_cols) / 40; cpi->cyclic_refresh_mode_index = 0; cpi->cyclic_refresh_q = 32; if (cpi->cyclic_refresh_mode_enabled) { CHECK_MEM_ERROR(cpi->cyclic_refresh_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1)); } else cpi->cyclic_refresh_map = (signed char *) NULL; // Test function for segmentation //segmentation_test_function((VP8_PTR) cpi); #ifdef ENTROPY_STATS init_context_counters(); #endif cpi->frames_since_key = 8; // Give a sensible default for the first frame. cpi->key_frame_frequency = cpi->oxcf.key_freq; cpi->source_alt_ref_pending = FALSE; cpi->source_alt_ref_active = FALSE; cpi->common.refresh_alt_ref_frame = 0; cpi->b_calculate_psnr = CONFIG_PSNR; #if CONFIG_PSNR cpi->b_calculate_ssimg = 0; cpi->count = 0; cpi->bytes = 0; if (cpi->b_calculate_psnr) { cpi->total_sq_error = 0.0; cpi->total_sq_error2 = 0.0; cpi->total_y = 0.0; cpi->total_u = 0.0; cpi->total_v = 0.0; cpi->total = 0.0; cpi->totalp_y = 0.0; cpi->totalp_u = 0.0; cpi->totalp_v = 0.0; cpi->totalp = 0.0; cpi->tot_recode_hits = 0; cpi->summed_quality = 0; cpi->summed_weights = 0; } if (cpi->b_calculate_ssimg) { cpi->total_ssimg_y = 0; cpi->total_ssimg_u = 0; cpi->total_ssimg_v = 0; cpi->total_ssimg_all = 0; } #ifndef LLONG_MAX #define LLONG_MAX 9223372036854775807LL #endif cpi->first_time_stamp_ever = LLONG_MAX; #endif cpi->frames_till_gf_update_due = 0; cpi->key_frame_count = 1; cpi->tot_key_frame_bits = 0; cpi->ni_av_qi = cpi->oxcf.worst_allowed_q; cpi->ni_tot_qi = 0; cpi->ni_frames = 0; cpi->total_byte_count = 0; cpi->drop_frame = 0; cpi->drop_count = 0; cpi->max_drop_count = 0; cpi->max_consec_dropped_frames = 4; cpi->rate_correction_factor = 1.0; cpi->key_frame_rate_correction_factor = 1.0; cpi->gf_rate_correction_factor = 1.0; cpi->est_max_qcorrection_factor = 1.0; cpi->mb.mvcost[0] = &cpi->mb.mvcosts[0][mv_max+1]; cpi->mb.mvcost[1] = &cpi->mb.mvcosts[1][mv_max+1]; cpi->mb.mvsadcost[0] = &cpi->mb.mvsadcosts[0][mv_max+1]; cpi->mb.mvsadcost[1] = &cpi->mb.mvsadcosts[1][mv_max+1]; cal_mvsadcosts(cpi->mb.mvsadcost); for (i = 0; i < KEY_FRAME_CONTEXT; i++) { cpi->prior_key_frame_size[i] = cpi->intra_frame_target; cpi->prior_key_frame_distance[i] = (int)cpi->output_frame_rate; } cpi->check_freq[0] = 15; cpi->check_freq[1] = 15; #ifdef OUTPUT_YUV_SRC yuv_file = fopen("bd.yuv", "ab"); #endif #if 0 framepsnr = fopen("framepsnr.stt", "a"); kf_list = fopen("kf_list.stt", "w"); #endif cpi->output_pkt_list = oxcf->output_pkt_list; #if !(CONFIG_REALTIME_ONLY) if (cpi->pass == 1) { vp8_init_first_pass(cpi); } else if (cpi->pass == 2) { size_t packet_sz = vp8_firstpass_stats_sz(cpi->common.MBs); int packets = oxcf->two_pass_stats_in.sz / packet_sz; cpi->stats_in = oxcf->two_pass_stats_in.buf; cpi->stats_in_end = (void*)((char *)cpi->stats_in + (packets - 1) * packet_sz); vp8_init_second_pass(cpi); } #endif if (cpi->compressor_speed == 2) { cpi->cpu_freq = 0; //vp8_get_processor_freq(); cpi->avg_encode_time = 0; cpi->avg_pick_mode_time = 0; } vp8_set_speed_features(cpi); // Set starting values of RD threshold multipliers (128 = *1) for (i = 0; i < MAX_MODES; i++) { cpi->rd_thresh_mult[i] = 128; } #ifdef ENTROPY_STATS init_mv_ref_counts(); #endif vp8cx_create_encoder_threads(cpi); cpi->fn_ptr[BLOCK_16X16].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16); cpi->fn_ptr[BLOCK_16X16].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16); cpi->fn_ptr[BLOCK_16X16].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x16); cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_h); cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_v); cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_hv); cpi->fn_ptr[BLOCK_16X16].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x3); cpi->fn_ptr[BLOCK_16X16].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x4d); cpi->fn_ptr[BLOCK_16X8].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8); cpi->fn_ptr[BLOCK_16X8].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x8); cpi->fn_ptr[BLOCK_16X8].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x8); cpi->fn_ptr[BLOCK_16X8].svf_halfpix_h = NULL; cpi->fn_ptr[BLOCK_16X8].svf_halfpix_v = NULL; cpi->fn_ptr[BLOCK_16X8].svf_halfpix_hv = NULL; cpi->fn_ptr[BLOCK_16X8].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x3); cpi->fn_ptr[BLOCK_16X8].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x4d); cpi->fn_ptr[BLOCK_8X16].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16); cpi->fn_ptr[BLOCK_8X16].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x16); cpi->fn_ptr[BLOCK_8X16].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x16); cpi->fn_ptr[BLOCK_8X16].svf_halfpix_h = NULL; cpi->fn_ptr[BLOCK_8X16].svf_halfpix_v = NULL; cpi->fn_ptr[BLOCK_8X16].svf_halfpix_hv = NULL; cpi->fn_ptr[BLOCK_8X16].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x3); cpi->fn_ptr[BLOCK_8X16].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x4d); cpi->fn_ptr[BLOCK_8X8].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8); cpi->fn_ptr[BLOCK_8X8].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x8); cpi->fn_ptr[BLOCK_8X8].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x8); cpi->fn_ptr[BLOCK_8X8].svf_halfpix_h = NULL; cpi->fn_ptr[BLOCK_8X8].svf_halfpix_v = NULL; cpi->fn_ptr[BLOCK_8X8].svf_halfpix_hv = NULL; cpi->fn_ptr[BLOCK_8X8].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x3); cpi->fn_ptr[BLOCK_8X8].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x4d); cpi->fn_ptr[BLOCK_4X4].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4); cpi->fn_ptr[BLOCK_4X4].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var4x4); cpi->fn_ptr[BLOCK_4X4].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar4x4); cpi->fn_ptr[BLOCK_4X4].svf_halfpix_h = NULL; cpi->fn_ptr[BLOCK_4X4].svf_halfpix_v = NULL; cpi->fn_ptr[BLOCK_4X4].svf_halfpix_hv = NULL; cpi->fn_ptr[BLOCK_4X4].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x3); cpi->fn_ptr[BLOCK_4X4].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x4d); #if !(CONFIG_REALTIME_ONLY) cpi->full_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, full_search); #endif cpi->diamond_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, diamond_search); cpi->ready_for_new_frame = 1; cpi->source_encode_index = 0; // make sure frame 1 is okay cpi->error_bins[0] = cpi->common.MBs; //vp8cx_init_quantizer() is first called here. Add check in vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only called later //when needed. This will avoid unnecessary calls of vp8cx_init_quantizer() for every frame. vp8cx_init_quantizer(cpi); { vp8_init_loop_filter(cm); cm->last_frame_type = KEY_FRAME; cm->last_filter_type = cm->filter_type; cm->last_sharpness_level = cm->sharpness_level; } cpi->common.error.setjmp = 0; return (VP8_PTR) cpi; } void vp8_remove_compressor(VP8_PTR *ptr) { VP8_COMP *cpi = (VP8_COMP *)(*ptr); if (!cpi) return; if (cpi && (cpi->common.current_video_frame > 0)) { #if !(CONFIG_REALTIME_ONLY) if (cpi->pass == 2) { vp8_end_second_pass(cpi); } #endif #ifdef ENTROPY_STATS print_context_counters(); print_tree_update_probs(); print_mode_context(); #endif #if CONFIG_PSNR if (cpi->pass != 1) { FILE *f = fopen("opsnr.stt", "a"); double time_encoded = (cpi->source_end_time_stamp - cpi->first_time_stamp_ever) / 10000000.000; double total_encode_time = (cpi->time_receive_data + cpi->time_compress_data) / 1000.000; double dr = (double)cpi->bytes * (double) 8 / (double)1000 / time_encoded; if (cpi->b_calculate_psnr) { YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx]; double samples = 3.0 / 2 * cpi->count * lst_yv12->y_width * lst_yv12->y_height; double total_psnr = vp8_mse2psnr(samples, 255.0, cpi->total_sq_error); double total_psnr2 = vp8_mse2psnr(samples, 255.0, cpi->total_sq_error2); double total_ssim = 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0); fprintf(f, "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\tVPXSSIM\t Time(us)\n"); fprintf(f, "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f %8.0f\n", dr, cpi->total / cpi->count, total_psnr, cpi->totalp / cpi->count, total_psnr2, total_ssim, total_encode_time); } if (cpi->b_calculate_ssimg) { fprintf(f, "BitRate\tSSIM_Y\tSSIM_U\tSSIM_V\tSSIM_A\t Time(us)\n"); fprintf(f, "%7.3f\t%6.4f\t%6.4f\t%6.4f\t%6.4f\t%8.0f\n", dr, cpi->total_ssimg_y / cpi->count, cpi->total_ssimg_u / cpi->count, cpi->total_ssimg_v / cpi->count, cpi->total_ssimg_all / cpi->count, total_encode_time); } fclose(f); #if 0 f = fopen("qskip.stt", "a"); fprintf(f, "minq:%d -maxq:%d skipture:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount); fclose(f); #endif } #endif #ifdef SPEEDSTATS if (cpi->compressor_speed == 2) { int i; FILE *f = fopen("cxspeed.stt", "a"); cnt_pm /= cpi->common.MBs; for (i = 0; i < 16; i++) fprintf(f, "%5d", frames_at_speed[i]); fprintf(f, "\n"); //fprintf(f, "%10d PM %10d %10d %10d EF %10d %10d %10d\n", cpi->Speed, cpi->avg_pick_mode_time, (tot_pm/cnt_pm), cnt_pm, cpi->avg_encode_time, 0, 0); fclose(f); } #endif #ifdef MODE_STATS { extern int count_mb_seg[4]; FILE *f = fopen("modes.stt", "a"); double dr = (double)cpi->oxcf.frame_rate * (double)bytes * (double)8 / (double)count / (double)1000 ; fprintf(f, "intra_mode in Intra Frames:\n"); fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1], y_modes[2], y_modes[3], y_modes[4]); fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1], uv_modes[2], uv_modes[3]); fprintf(f, "B: "); { int i; for (i = 0; i < 10; i++) fprintf(f, "%8d, ", b_modes[i]); fprintf(f, "\n"); } fprintf(f, "Modes in Inter Frames:\n"); fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n", inter_y_modes[0], inter_y_modes[1], inter_y_modes[2], inter_y_modes[3], inter_y_modes[4], inter_y_modes[5], inter_y_modes[6], inter_y_modes[7], inter_y_modes[8], inter_y_modes[9]); fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0], inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]); fprintf(f, "B: "); { int i; for (i = 0; i < 15; i++) fprintf(f, "%8d, ", inter_b_modes[i]); fprintf(f, "\n"); } fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1], count_mb_seg[2], count_mb_seg[3]); fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4], inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4], inter_b_modes[NEW4X4]); fclose(f); } #endif #ifdef ENTROPY_STATS { int i, j, k; FILE *fmode = fopen("modecontext.c", "w"); fprintf(fmode, "\n#include \"entropymode.h\"\n\n"); fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts "); fprintf(fmode, "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n"); for (i = 0; i < 10; i++) { fprintf(fmode, " { //Above Mode : %d\n", i); for (j = 0; j < 10; j++) { fprintf(fmode, " {"); for (k = 0; k < 10; k++) { if (!intra_mode_stats[i][j][k]) fprintf(fmode, " %5d, ", 1); else fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]); } fprintf(fmode, "}, // left_mode %d\n", j); } fprintf(fmode, " },\n"); } fprintf(fmode, "};\n"); fclose(fmode); } #endif #if defined(SECTIONBITS_OUTPUT) if (0) { int i; FILE *f = fopen("tokenbits.stt", "a"); for (i = 0; i < 28; i++) fprintf(f, "%8d", (int)(Sectionbits[i] / 256)); fprintf(f, "\n"); fclose(f); } #endif #if 0 { printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000); printf("\n_frames recive_data encod_mb_row compress_frame Total\n"); printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000); } #endif } vp8cx_remove_encoder_threads(cpi); vp8_dealloc_compressor_data(cpi); vpx_free(cpi->mb.ss); vpx_free(cpi->tok); vpx_free(cpi->rdtok); vpx_free(cpi->cyclic_refresh_map); vp8_remove_common(&cpi->common); vpx_free(cpi); *ptr = 0; #ifdef OUTPUT_YUV_SRC fclose(yuv_file); #endif #if 0 if (keyfile) fclose(keyfile); if (framepsnr) fclose(framepsnr); if (kf_list) fclose(kf_list); #endif } static uint64_t calc_plane_error(unsigned char *orig, int orig_stride, unsigned char *recon, int recon_stride, unsigned int cols, unsigned int rows, vp8_variance_rtcd_vtable_t *rtcd) { unsigned int row, col; uint64_t total_sse = 0; int diff; for (row = 0; row + 16 <= rows; row += 16) { for (col = 0; col + 16 <= cols; col += 16) { unsigned int sse; VARIANCE_INVOKE(rtcd, mse16x16)(orig + col, orig_stride, recon + col, recon_stride, &sse); total_sse += sse; } /* Handle odd-sized width */ if (col < cols) { unsigned int border_row, border_col; unsigned char *border_orig = orig; unsigned char *border_recon = recon; for (border_row = 0; border_row < 16; border_row++) { for (border_col = col; border_col < cols; border_col++) { diff = border_orig[border_col] - border_recon[border_col]; total_sse += diff * diff; } border_orig += orig_stride; border_recon += recon_stride; } } orig += orig_stride * 16; recon += recon_stride * 16; } /* Handle odd-sized height */ for (; row < rows; row++) { for (col = 0; col < cols; col++) { diff = orig[col] - recon[col]; total_sse += diff * diff; } orig += orig_stride; recon += recon_stride; } return total_sse; } static void generate_psnr_packet(VP8_COMP *cpi) { YV12_BUFFER_CONFIG *orig = cpi->Source; YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show; struct vpx_codec_cx_pkt pkt; uint64_t sse; int i; unsigned int width = cpi->common.Width; unsigned int height = cpi->common.Height; pkt.kind = VPX_CODEC_PSNR_PKT; sse = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer, recon->y_stride, width, height, IF_RTCD(&cpi->rtcd.variance)); pkt.data.psnr.sse[0] = sse; pkt.data.psnr.sse[1] = sse; pkt.data.psnr.samples[0] = width * height; pkt.data.psnr.samples[1] = width * height; width = (width + 1) / 2; height = (height + 1) / 2; sse = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer, recon->uv_stride, width, height, IF_RTCD(&cpi->rtcd.variance)); pkt.data.psnr.sse[0] += sse; pkt.data.psnr.sse[2] = sse; pkt.data.psnr.samples[0] += width * height; pkt.data.psnr.samples[2] = width * height; sse = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer, recon->uv_stride, width, height, IF_RTCD(&cpi->rtcd.variance)); pkt.data.psnr.sse[0] += sse; pkt.data.psnr.sse[3] = sse; pkt.data.psnr.samples[0] += width * height; pkt.data.psnr.samples[3] = width * height; for (i = 0; i < 4; i++) pkt.data.psnr.psnr[i] = vp8_mse2psnr(pkt.data.psnr.samples[i], 255.0, pkt.data.psnr.sse[i]); vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt); } int vp8_use_as_reference(VP8_PTR ptr, int ref_frame_flags) { VP8_COMP *cpi = (VP8_COMP *)(ptr); if (ref_frame_flags > 7) return -1 ; cpi->ref_frame_flags = ref_frame_flags; return 0; } int vp8_update_reference(VP8_PTR ptr, int ref_frame_flags) { VP8_COMP *cpi = (VP8_COMP *)(ptr); if (ref_frame_flags > 7) return -1 ; cpi->common.refresh_golden_frame = 0; cpi->common.refresh_alt_ref_frame = 0; cpi->common.refresh_last_frame = 0; if (ref_frame_flags & VP8_LAST_FLAG) cpi->common.refresh_last_frame = 1; if (ref_frame_flags & VP8_GOLD_FLAG) cpi->common.refresh_golden_frame = 1; if (ref_frame_flags & VP8_ALT_FLAG) cpi->common.refresh_alt_ref_frame = 1; return 0; } int vp8_get_reference(VP8_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd) { VP8_COMP *cpi = (VP8_COMP *)(ptr); VP8_COMMON *cm = &cpi->common; int ref_fb_idx; if (ref_frame_flag == VP8_LAST_FLAG) ref_fb_idx = cm->lst_fb_idx; else if (ref_frame_flag == VP8_GOLD_FLAG) ref_fb_idx = cm->gld_fb_idx; else if (ref_frame_flag == VP8_ALT_FLAG) ref_fb_idx = cm->alt_fb_idx; else return -1; vp8_yv12_copy_frame_ptr(&cm->yv12_fb[ref_fb_idx], sd); return 0; } int vp8_set_reference(VP8_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd) { VP8_COMP *cpi = (VP8_COMP *)(ptr); VP8_COMMON *cm = &cpi->common; int ref_fb_idx; if (ref_frame_flag == VP8_LAST_FLAG) ref_fb_idx = cm->lst_fb_idx; else if (ref_frame_flag == VP8_GOLD_FLAG) ref_fb_idx = cm->gld_fb_idx; else if (ref_frame_flag == VP8_ALT_FLAG) ref_fb_idx = cm->alt_fb_idx; else return -1; vp8_yv12_copy_frame_ptr(sd, &cm->yv12_fb[ref_fb_idx]); return 0; } int vp8_update_entropy(VP8_PTR comp, int update) { VP8_COMP *cpi = (VP8_COMP *) comp; VP8_COMMON *cm = &cpi->common; cm->refresh_entropy_probs = update; return 0; } #if OUTPUT_YUV_SRC void vp8_write_yuv_frame(const char *name, YV12_BUFFER_CONFIG *s) { FILE *yuv_file = fopen(name, "ab"); unsigned char *src = s->y_buffer; int h = s->y_height; do { fwrite(src, s->y_width, 1, yuv_file); src += s->y_stride; } while (--h); src = s->u_buffer; h = s->uv_height; do { fwrite(src, s->uv_width, 1, yuv_file); src += s->uv_stride; } while (--h); src = s->v_buffer; h = s->uv_height; do { fwrite(src, s->uv_width, 1, yuv_file); src += s->uv_stride; } while (--h); fclose(yuv_file); } #endif static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) { VP8_COMMON *cm = &cpi->common; // are we resizing the image if (cm->horiz_scale != 0 || cm->vert_scale != 0) { #if CONFIG_SPATIAL_RESAMPLING int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs); int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs); int tmp_height; if (cm->vert_scale == 3) tmp_height = 9; else tmp_height = 11; Scale2Ratio(cm->horiz_scale, &hr, &hs); Scale2Ratio(cm->vert_scale, &vr, &vs); vp8_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer, tmp_height, hs, hr, vs, vr, 0); cpi->Source = &cpi->scaled_source; #endif } // we may need to copy to a buffer so we can extend the image... else if (cm->Width != cm->yv12_fb[cm->lst_fb_idx].y_width || cm->Height != cm->yv12_fb[cm->lst_fb_idx].y_height) { //vp8_yv12_copy_frame_ptr(sd, &cpi->scaled_source); #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_yv12_copy_src_frame_func_neon(sd, &cpi->scaled_source); } #if CONFIG_RUNTIME_CPU_DETECT else #endif #endif #if !HAVE_ARMV7 || CONFIG_RUNTIME_CPU_DETECT { vp8_yv12_copy_frame_ptr(sd, &cpi->scaled_source); } #endif cpi->Source = &cpi->scaled_source; } vp8_extend_to_multiple_of16(cpi->Source, cm->Width, cm->Height); } static void resize_key_frame(VP8_COMP *cpi) { #if CONFIG_SPATIAL_RESAMPLING VP8_COMMON *cm = &cpi->common; // Do we need to apply resampling for one pass cbr. // In one pass this is more limited than in two pass cbr // The test and any change is only made one per key frame sequence if (cpi->oxcf.allow_spatial_resampling && (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) { int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs); int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs); int new_width, new_height; // If we are below the resample DOWN watermark then scale down a notch. if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark * cpi->oxcf.optimal_buffer_level / 100)) { cm->horiz_scale = (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO; cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO; } // Should we now start scaling back up else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark * cpi->oxcf.optimal_buffer_level / 100)) { cm->horiz_scale = (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL; cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL; } // Get the new hieght and width Scale2Ratio(cm->horiz_scale, &hr, &hs); Scale2Ratio(cm->vert_scale, &vr, &vs); new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs; new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs; // If the image size has changed we need to reallocate the buffers // and resample the source image if ((cm->Width != new_width) || (cm->Height != new_height)) { cm->Width = new_width; cm->Height = new_height; vp8_alloc_compressor_data(cpi); scale_and_extend_source(cpi->un_scaled_source, cpi); } } #endif } // return of 0 means drop frame static int pick_frame_size(VP8_COMP *cpi) { VP8_COMMON *cm = &cpi->common; // First Frame is a special case if (cm->current_video_frame == 0) { #if !(CONFIG_REALTIME_ONLY) if (cpi->pass == 2) vp8_calc_auto_iframe_target_size(cpi); // 1 Pass there is no information on which to base size so use bandwidth per second * fixed fraction else #endif cpi->this_frame_target = cpi->oxcf.target_bandwidth / 2; // in error resilient mode the first frame is bigger since it likely contains // all the static background if (cpi->oxcf.error_resilient_mode == 1 || (cpi->compressor_speed == 2)) { cpi->this_frame_target *= 3; // 5; } // Key frame from VFW/auto-keyframe/first frame cm->frame_type = KEY_FRAME; } // Special case for forced key frames // The frame sizing here is still far from ideal for 2 pass. else if (cm->frame_flags & FRAMEFLAGS_KEY) { cm->frame_type = KEY_FRAME; resize_key_frame(cpi); vp8_calc_iframe_target_size(cpi); } else if (cm->frame_type == KEY_FRAME) { vp8_calc_auto_iframe_target_size(cpi); } else { // INTER frame: compute target frame size cm->frame_type = INTER_FRAME; vp8_calc_pframe_target_size(cpi); // Check if we're dropping the frame: if (cpi->drop_frame) { cpi->drop_frame = FALSE; cpi->drop_count++; return 0; } } // Note target_size in bits * 256 per MB cpi->target_bits_per_mb = (cpi->this_frame_target * 256) / cpi->common.MBs; return 1; } static void set_quantizer(VP8_COMP *cpi, int Q) { VP8_COMMON *cm = &cpi->common; MACROBLOCKD *mbd = &cpi->mb.e_mbd; cm->base_qindex = Q; cm->y1dc_delta_q = 0; cm->y2dc_delta_q = 0; cm->y2ac_delta_q = 0; cm->uvdc_delta_q = 0; cm->uvac_delta_q = 0; // Set Segment specific quatizers mbd->segment_feature_data[MB_LVL_ALT_Q][0] = cpi->segment_feature_data[MB_LVL_ALT_Q][0]; mbd->segment_feature_data[MB_LVL_ALT_Q][1] = cpi->segment_feature_data[MB_LVL_ALT_Q][1]; mbd->segment_feature_data[MB_LVL_ALT_Q][2] = cpi->segment_feature_data[MB_LVL_ALT_Q][2]; mbd->segment_feature_data[MB_LVL_ALT_Q][3] = cpi->segment_feature_data[MB_LVL_ALT_Q][3]; } static void update_alt_ref_frame_and_stats(VP8_COMP *cpi) { VP8_COMMON *cm = &cpi->common; // Update the golden frame buffer vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->yv12_fb[cm->alt_fb_idx]); // Select an interval before next GF or altref if (!cpi->auto_gold) cpi->frames_till_gf_update_due = cpi->goldfreq; if ((cpi->pass != 2) && cpi->frames_till_gf_update_due) { cpi->current_gf_interval = cpi->frames_till_gf_update_due; // Set the bits per frame that we should try and recover in subsequent inter frames // to account for the extra GF spend... note that his does not apply for GF updates // that occur coincident with a key frame as the extra cost of key frames is dealt // with elsewhere. cpi->gf_overspend_bits += cpi->projected_frame_size; cpi->non_gf_bitrate_adjustment = cpi->gf_overspend_bits / cpi->frames_till_gf_update_due; } // Update data structure that monitors level of reference to last GF vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols)); cpi->gf_active_count = cm->mb_rows * cm->mb_cols; // this frame refreshes means next frames don't unless specified by user cpi->common.frames_since_golden = 0; // Clear the alternate reference update pending flag. cpi->source_alt_ref_pending = FALSE; // Set the alternate refernce frame active flag cpi->source_alt_ref_active = TRUE; } static void update_golden_frame_and_stats(VP8_COMP *cpi) { VP8_COMMON *cm = &cpi->common; // Update the Golden frame reconstruction buffer if signalled and the GF usage counts. if (cm->refresh_golden_frame) { // Update the golden frame buffer vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->yv12_fb[cm->gld_fb_idx]); // Select an interval before next GF if (!cpi->auto_gold) cpi->frames_till_gf_update_due = cpi->goldfreq; if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0)) { cpi->current_gf_interval = cpi->frames_till_gf_update_due; // Set the bits per frame that we should try and recover in subsequent inter frames // to account for the extra GF spend... note that his does not apply for GF updates // that occur coincident with a key frame as the extra cost of key frames is dealt // with elsewhere. if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active) { // Calcluate GF bits to be recovered // Projected size - av frame bits available for inter frames for clip as a whole cpi->gf_overspend_bits += (cpi->projected_frame_size - cpi->inter_frame_target); } cpi->non_gf_bitrate_adjustment = cpi->gf_overspend_bits / cpi->frames_till_gf_update_due; } // Update data structure that monitors level of reference to last GF vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols)); cpi->gf_active_count = cm->mb_rows * cm->mb_cols; // this frame refreshes means next frames don't unless specified by user cm->refresh_golden_frame = 0; cpi->common.frames_since_golden = 0; //if ( cm->frame_type == KEY_FRAME ) //{ cpi->recent_ref_frame_usage[INTRA_FRAME] = 1; cpi->recent_ref_frame_usage[LAST_FRAME] = 1; cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1; cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1; //} //else //{ // // Carry a potrtion of count over to begining of next gf sequence // cpi->recent_ref_frame_usage[INTRA_FRAME] >>= 5; // cpi->recent_ref_frame_usage[LAST_FRAME] >>= 5; // cpi->recent_ref_frame_usage[GOLDEN_FRAME] >>= 5; // cpi->recent_ref_frame_usage[ALTREF_FRAME] >>= 5; //} // ******** Fixed Q test code only ************ // If we are going to use the ALT reference for the next group of frames set a flag to say so. if (cpi->oxcf.fixed_q >= 0 && cpi->oxcf.play_alternate && !cpi->common.refresh_alt_ref_frame) { cpi->source_alt_ref_pending = TRUE; cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; } if (!cpi->source_alt_ref_pending) cpi->source_alt_ref_active = FALSE; // Decrement count down till next gf if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--; } else if (!cpi->common.refresh_alt_ref_frame) { // Decrement count down till next gf if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--; if (cpi->common.frames_till_alt_ref_frame) cpi->common.frames_till_alt_ref_frame --; cpi->common.frames_since_golden ++; if (cpi->common.frames_since_golden > 1) { cpi->recent_ref_frame_usage[INTRA_FRAME] += cpi->count_mb_ref_frame_usage[INTRA_FRAME]; cpi->recent_ref_frame_usage[LAST_FRAME] += cpi->count_mb_ref_frame_usage[LAST_FRAME]; cpi->recent_ref_frame_usage[GOLDEN_FRAME] += cpi->count_mb_ref_frame_usage[GOLDEN_FRAME]; cpi->recent_ref_frame_usage[ALTREF_FRAME] += cpi->count_mb_ref_frame_usage[ALTREF_FRAME]; } } } // This function updates the reference frame probability estimates that // will be used during mode selection static void update_rd_ref_frame_probs(VP8_COMP *cpi) { VP8_COMMON *cm = &cpi->common; #if 0 const int *const rfct = cpi->recent_ref_frame_usage; const int rf_intra = rfct[INTRA_FRAME]; const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]; if (cm->frame_type == KEY_FRAME) { cpi->prob_intra_coded = 255; cpi->prob_last_coded = 128; cpi->prob_gf_coded = 128; } else if (!(rf_intra + rf_inter)) { // This is a trap in case this function is called with cpi->recent_ref_frame_usage[] blank. cpi->prob_intra_coded = 63; cpi->prob_last_coded = 128; cpi->prob_gf_coded = 128; } else { cpi->prob_intra_coded = (rf_intra * 255) / (rf_intra + rf_inter); if (cpi->prob_intra_coded < 1) cpi->prob_intra_coded = 1; if ((cm->frames_since_golden > 0) || cpi->source_alt_ref_active) { cpi->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128; if (cpi->prob_last_coded < 1) cpi->prob_last_coded = 1; cpi->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) ? (rfct[GOLDEN_FRAME] * 255) / (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128; if (cpi->prob_gf_coded < 1) cpi->prob_gf_coded = 1; } } #else const int *const rfct = cpi->count_mb_ref_frame_usage; const int rf_intra = rfct[INTRA_FRAME]; const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]; if (cm->frame_type == KEY_FRAME) { cpi->prob_intra_coded = 255; cpi->prob_last_coded = 128; cpi->prob_gf_coded = 128; } else if (!(rf_intra + rf_inter)) { // This is a trap in case this function is called with cpi->recent_ref_frame_usage[] blank. cpi->prob_intra_coded = 63; cpi->prob_last_coded = 128; cpi->prob_gf_coded = 128; } else { cpi->prob_intra_coded = (rf_intra * 255) / (rf_intra + rf_inter); if (cpi->prob_intra_coded < 1) cpi->prob_intra_coded = 1; cpi->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128; if (cpi->prob_last_coded < 1) cpi->prob_last_coded = 1; cpi->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) ? (rfct[GOLDEN_FRAME] * 255) / (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128; if (cpi->prob_gf_coded < 1) cpi->prob_gf_coded = 1; } // update reference frame costs since we can do better than what we got last frame. if (cpi->common.refresh_alt_ref_frame) { cpi->prob_intra_coded += 40; cpi->prob_last_coded = 200; cpi->prob_gf_coded = 1; } else if (cpi->common.frames_since_golden == 0) { cpi->prob_last_coded = 214; cpi->prob_gf_coded = 1; } else if (cpi->common.frames_since_golden == 1) { cpi->prob_last_coded = 192; cpi->prob_gf_coded = 220; } else if (cpi->source_alt_ref_active) { //int dist = cpi->common.frames_till_alt_ref_frame + cpi->common.frames_since_golden; cpi->prob_gf_coded -= 20; if (cpi->prob_gf_coded < 10) cpi->prob_gf_coded = 10; } #endif } // 1 = key, 0 = inter static int decide_key_frame(VP8_COMP *cpi) { VP8_COMMON *cm = &cpi->common; int code_key_frame = FALSE; cpi->kf_boost = 0; if (cpi->Speed > 11) return FALSE; // Clear down mmx registers vp8_clear_system_state(); //__asm emms; if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0)) { double change = 1.0 * abs((int)(cpi->intra_error - cpi->last_intra_error)) / (1 + cpi->last_intra_error); double change2 = 1.0 * abs((int)(cpi->prediction_error - cpi->last_prediction_error)) / (1 + cpi->last_prediction_error); double minerror = cm->MBs * 256; #if 0 if (10 * cpi->intra_error / (1 + cpi->prediction_error) < 15 && cpi->prediction_error > minerror && (change > .25 || change2 > .25)) { FILE *f = fopen("intra_inter.stt", "a"); if (cpi->prediction_error <= 0) cpi->prediction_error = 1; fprintf(f, "%d %d %d %d %14.4f\n", cm->current_video_frame, (int) cpi->prediction_error, (int) cpi->intra_error, (int)((10 * cpi->intra_error) / cpi->prediction_error), change); fclose(f); } #endif cpi->last_intra_error = cpi->intra_error; cpi->last_prediction_error = cpi->prediction_error; if (10 * cpi->intra_error / (1 + cpi->prediction_error) < 15 && cpi->prediction_error > minerror && (change > .25 || change2 > .25)) { /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra > cpi->last_frame_percent_intra + 3*/ return TRUE; } return FALSE; } // If the following are true we might as well code a key frame if (((cpi->this_frame_percent_intra == 100) && (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) || ((cpi->this_frame_percent_intra > 95) && (cpi->this_frame_percent_intra >= (cpi->last_frame_percent_intra + 5)))) { code_key_frame = TRUE; } // in addition if the following are true and this is not a golden frame then code a key frame // Note that on golden frames there often seems to be a pop in intra useage anyway hence this // restriction is designed to prevent spurious key frames. The Intra pop needs to be investigated. else if (((cpi->this_frame_percent_intra > 60) && (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra * 2))) || ((cpi->this_frame_percent_intra > 75) && (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra * 3 / 2))) || ((cpi->this_frame_percent_intra > 90) && (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 10)))) { if (!cm->refresh_golden_frame) code_key_frame = TRUE; } return code_key_frame; } #if !(CONFIG_REALTIME_ONLY) static void Pass1Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags) { (void) size; (void) dest; (void) frame_flags; set_quantizer(cpi, 26); scale_and_extend_source(cpi->un_scaled_source, cpi); vp8_first_pass(cpi); } #endif #if 0 void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame) { // write the frame FILE *yframe; int i; char filename[255]; sprintf(filename, "cx\\y%04d.raw", this_frame); yframe = fopen(filename, "wb"); for (i = 0; i < frame->y_height; i++) fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe); fclose(yframe); sprintf(filename, "cx\\u%04d.raw", this_frame); yframe = fopen(filename, "wb"); for (i = 0; i < frame->uv_height; i++) fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe); fclose(yframe); sprintf(filename, "cx\\v%04d.raw", this_frame); yframe = fopen(filename, "wb"); for (i = 0; i < frame->uv_height; i++) fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe); fclose(yframe); } #endif // return of 0 means drop frame static void encode_frame_to_data_rate ( VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags ) { int Q; int frame_over_shoot_limit; int frame_under_shoot_limit; int Loop = FALSE; int loop_count; int this_q; int last_zbin_oq; int q_low; int q_high; int zbin_oq_high; int zbin_oq_low = 0; int top_index; int bottom_index; VP8_COMMON *cm = &cpi->common; int active_worst_qchanged = FALSE; int overshoot_seen = FALSE; int undershoot_seen = FALSE; int drop_mark = cpi->oxcf.drop_frames_water_mark * cpi->oxcf.optimal_buffer_level / 100; int drop_mark75 = drop_mark * 2 / 3; int drop_mark50 = drop_mark / 4; int drop_mark25 = drop_mark / 8; // Clear down mmx registers to allow floating point in what follows vp8_clear_system_state(); // Test code for segmentation of gf/arf (0,0) //segmentation_test_function((VP8_PTR) cpi); // For an alt ref frame in 2 pass we skip the call to the second pass function that sets the target bandwidth #if !(CONFIG_REALTIME_ONLY) if (cpi->pass == 2) { if (cpi->common.refresh_alt_ref_frame) { cpi->per_frame_bandwidth = cpi->gf_bits; // Per frame bit target for the alt ref frame cpi->target_bandwidth = cpi->gf_bits * cpi->output_frame_rate; // per second target bitrate } } else #endif cpi->per_frame_bandwidth = (int)(cpi->target_bandwidth / cpi->output_frame_rate); // Default turn off buffer to buffer copying cm->copy_buffer_to_gf = 0; cm->copy_buffer_to_arf = 0; // Clear zbin over-quant value and mode boost values. cpi->zbin_over_quant = 0; cpi->zbin_mode_boost = 0; // Enable mode based tweaking of the zbin cpi->zbin_mode_boost_enabled = TRUE; // Current default encoder behaviour for the altref sign bias if (cpi->source_alt_ref_active) cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1; else cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0; // Check to see if a key frame is signalled // For two pass with auto key frame enabled cm->frame_type may already be set, but not for one pass. if ((cm->current_video_frame == 0) || (cm->frame_flags & FRAMEFLAGS_KEY) || (cpi->oxcf.auto_key && (cpi->frames_since_key % cpi->key_frame_frequency == 0))) { // Key frame from VFW/auto-keyframe/first frame cm->frame_type = KEY_FRAME; } // Set default state for segment and mode based loop filter update flags cpi->mb.e_mbd.update_mb_segmentation_map = 0; cpi->mb.e_mbd.update_mb_segmentation_data = 0; cpi->mb.e_mbd.mode_ref_lf_delta_update = 0; // Set various flags etc to special state if it is a key frame if (cm->frame_type == KEY_FRAME) { int i; // Reset the loop filter deltas and segmentation map setup_features(cpi); // If segmentation is enabled force a map update for key frames if (cpi->mb.e_mbd.segmentation_enabled) { cpi->mb.e_mbd.update_mb_segmentation_map = 1; cpi->mb.e_mbd.update_mb_segmentation_data = 1; } // The alternate reference frame cannot be active for a key frame cpi->source_alt_ref_active = FALSE; // Reset the RD threshold multipliers to default of * 1 (128) for (i = 0; i < MAX_MODES; i++) { cpi->rd_thresh_mult[i] = 128; } } // Test code for segmentation //if ( (cm->frame_type == KEY_FRAME) || ((cm->current_video_frame % 2) == 0)) //if ( (cm->current_video_frame % 2) == 0 ) // enable_segmentation((VP8_PTR)cpi); //else // disable_segmentation((VP8_PTR)cpi); #if 0 // Experimental code for lagged compress and one pass // Initialise one_pass GF frames stats // Update stats used for GF selection //if ( cpi->pass == 0 ) { cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0; } #endif update_rd_ref_frame_probs(cpi); if (cpi->drop_frames_allowed) { // The reset to decimation 0 is only done here for one pass. // Once it is set two pass leaves decimation on till the next kf. if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0)) cpi->decimation_factor --; if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0) cpi->decimation_factor = 1; else if (cpi->buffer_level < drop_mark25 && (cpi->decimation_factor == 2 || cpi->decimation_factor == 3)) { cpi->decimation_factor = 3; } else if (cpi->buffer_level < drop_mark50 && (cpi->decimation_factor == 1 || cpi->decimation_factor == 2)) { cpi->decimation_factor = 2; } else if (cpi->buffer_level < drop_mark75 && (cpi->decimation_factor == 0 || cpi->decimation_factor == 1)) { cpi->decimation_factor = 1; } //vpx_log("Encoder: Decimation Factor: %d \n",cpi->decimation_factor); } // The following decimates the frame rate according to a regular pattern (i.e. to 1/2 or 2/3 frame rate) // This can be used to help prevent buffer under-run in CBR mode. Alternatively it might be desirable in // some situations to drop frame rate but throw more bits at each frame. // // Note that dropping a key frame can be problematic if spatial resampling is also active if (cpi->decimation_factor > 0) { switch (cpi->decimation_factor) { case 1: cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2; break; case 2: cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4; break; case 3: cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4; break; } // Note that we should not throw out a key frame (especially when spatial resampling is enabled). if ((cm->frame_type == KEY_FRAME)) // && cpi->oxcf.allow_spatial_resampling ) { cpi->decimation_count = cpi->decimation_factor; } else if (cpi->decimation_count > 0) { cpi->decimation_count --; cpi->bits_off_target += cpi->av_per_frame_bandwidth; cm->current_video_frame++; cpi->frames_since_key++; #if CONFIG_PSNR cpi->count ++; #endif cpi->buffer_level = cpi->bits_off_target; return; } else cpi->decimation_count = cpi->decimation_factor; } // Decide how big to make the frame if (!pick_frame_size(cpi)) { cm->current_video_frame++; cpi->frames_since_key++; return; } // Reduce active_worst_allowed_q for CBR if our buffer is getting too full. // This has a knock on effect on active best quality as well. // For CBR if the buffer reaches its maximum level then we can no longer // save up bits for later frames so we might as well use them up // on the current frame. if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) && (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) && cpi->buffered_mode) { int Adjustment = cpi->active_worst_quality / 4; // Max adjustment is 1/4 if (Adjustment) { int buff_lvl_step; int tmp_lvl = cpi->buffer_level; if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size) { buff_lvl_step = (cpi->oxcf.maximum_buffer_size - cpi->oxcf.optimal_buffer_level) / Adjustment; if (buff_lvl_step) { Adjustment = (cpi->buffer_level - cpi->oxcf.optimal_buffer_level) / buff_lvl_step; cpi->active_worst_quality -= Adjustment; } } else { cpi->active_worst_quality -= Adjustment; } } } // Set an active best quality and if necessary active worst quality if (cpi->pass == 2 || (cm->current_video_frame > 150)) { int Q; int i; int bpm_target; //int tmp; vp8_clear_system_state(); Q = cpi->active_worst_quality; if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame) { if (cm->frame_type != KEY_FRAME) { if (cpi->avg_frame_qindex < cpi->active_worst_quality) Q = cpi->avg_frame_qindex; if ( cpi->gfu_boost > 1000 ) cpi->active_best_quality = gf_low_motion_minq[Q]; else if ( cpi->gfu_boost < 400 ) cpi->active_best_quality = gf_high_motion_minq[Q]; else cpi->active_best_quality = gf_mid_motion_minq[Q]; /*cpi->active_best_quality = gf_arf_minq[Q]; tmp = (cpi->gfu_boost > 1000) ? 600 : cpi->gfu_boost - 400; //tmp = (cpi->gfu_boost > 1000) ? 600 : //(cpi->gfu_boost < 400) ? 0 : cpi->gfu_boost - 400; tmp = 128 - (tmp >> 4); cpi->active_best_quality = (cpi->active_best_quality * tmp)>>7;*/ } // KEY FRAMES else { if (cpi->gfu_boost > 600) cpi->active_best_quality = kf_low_motion_minq[Q]; else cpi->active_best_quality = kf_high_motion_minq[Q]; } } else { cpi->active_best_quality = inter_minq[Q]; } // If CBR and the buffer is as full then it is reasonable to allow higher quality on the frames // to prevent bits just going to waste. if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { // Note that the use of >= here elliminates the risk of a devide by 0 error in the else if clause if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size) cpi->active_best_quality = cpi->best_quality; else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level) { int Fraction = ((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128) / (cpi->oxcf.maximum_buffer_size - cpi->oxcf.optimal_buffer_level); int min_qadjustment = ((cpi->active_best_quality - cpi->best_quality) * Fraction) / 128; cpi->active_best_quality -= min_qadjustment; } } } // Clip the active best and worst quality values to limits if (cpi->active_worst_quality > cpi->worst_quality) cpi->active_worst_quality = cpi->worst_quality; if (cpi->active_best_quality < cpi->best_quality) cpi->active_best_quality = cpi->best_quality; else if (cpi->active_best_quality > cpi->active_worst_quality) cpi->active_best_quality = cpi->active_worst_quality; // Determine initial Q to try Q = vp8_regulate_q(cpi, cpi->this_frame_target); last_zbin_oq = cpi->zbin_over_quant; // Set highest allowed value for Zbin over quant if (cm->frame_type == KEY_FRAME) zbin_oq_high = 0; //ZBIN_OQ_MAX/16 else if (cm->refresh_alt_ref_frame || (cm->refresh_golden_frame && !cpi->source_alt_ref_active)) zbin_oq_high = 16; else zbin_oq_high = ZBIN_OQ_MAX; // Setup background Q adjustment for error resilliant mode if (cpi->cyclic_refresh_mode_enabled) cyclic_background_refresh(cpi, Q, 0); vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit); // Limit Q range for the adaptive loop (Values not clipped to range 20-60 as in VP8). bottom_index = cpi->active_best_quality; top_index = cpi->active_worst_quality; vp8_save_coding_context(cpi); loop_count = 0; q_low = cpi->best_quality; q_high = cpi->worst_quality; scale_and_extend_source(cpi->un_scaled_source, cpi); #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC if (cpi->oxcf.noise_sensitivity > 0) { unsigned char *src; int l = 0; switch (cpi->oxcf.noise_sensitivity) { case 1: l = 20; break; case 2: l = 40; break; case 3: l = 60; break; case 4: l = 80; break; case 5: l = 100; break; case 6: l = 150; break; } if (cm->frame_type == KEY_FRAME) { vp8_de_noise(cpi->Source, cpi->Source, l , 1, 0, RTCD(postproc)); cpi->ppi.frame = 0; } else { vp8_de_noise(cpi->Source, cpi->Source, l , 1, 0, RTCD(postproc)); src = cpi->Source->y_buffer; if (cpi->Source->y_stride < 0) { src += cpi->Source->y_stride * (cpi->Source->y_height - 1); } //temp_filter(&cpi->ppi,src,src, // cm->last_frame.y_width * cm->last_frame.y_height, // cpi->oxcf.noise_sensitivity); } } #endif #ifdef OUTPUT_YUV_SRC vp8_write_yuv_frame(cpi->Source); #endif do { vp8_clear_system_state(); //__asm emms; /* if(cpi->is_src_frame_alt_ref) Q = 127; */ set_quantizer(cpi, Q); this_q = Q; // setup skip prob for costing in mode/mv decision if (cpi->common.mb_no_coeff_skip) { cpi->prob_skip_false = cpi->base_skip_false_prob[Q]; if (cm->frame_type != KEY_FRAME) { if (cpi->common.refresh_alt_ref_frame) { if (cpi->last_skip_false_probs[2] != 0) cpi->prob_skip_false = cpi->last_skip_false_probs[2]; /* if(cpi->last_skip_false_probs[2]!=0 && abs(Q- cpi->last_skip_probs_q[2])<=16 ) cpi->prob_skip_false = cpi->last_skip_false_probs[2]; else if (cpi->last_skip_false_probs[2]!=0) cpi->prob_skip_false = (cpi->last_skip_false_probs[2] + cpi->prob_skip_false ) / 2; */ } else if (cpi->common.refresh_golden_frame) { if (cpi->last_skip_false_probs[1] != 0) cpi->prob_skip_false = cpi->last_skip_false_probs[1]; /* if(cpi->last_skip_false_probs[1]!=0 && abs(Q- cpi->last_skip_probs_q[1])<=16 ) cpi->prob_skip_false = cpi->last_skip_false_probs[1]; else if (cpi->last_skip_false_probs[1]!=0) cpi->prob_skip_false = (cpi->last_skip_false_probs[1] + cpi->prob_skip_false ) / 2; */ } else { if (cpi->last_skip_false_probs[0] != 0) cpi->prob_skip_false = cpi->last_skip_false_probs[0]; /* if(cpi->last_skip_false_probs[0]!=0 && abs(Q- cpi->last_skip_probs_q[0])<=16 ) cpi->prob_skip_false = cpi->last_skip_false_probs[0]; else if(cpi->last_skip_false_probs[0]!=0) cpi->prob_skip_false = (cpi->last_skip_false_probs[0] + cpi->prob_skip_false ) / 2; */ } //as this is for cost estimate, let's make sure it does not go extreme eitehr way if (cpi->prob_skip_false < 5) cpi->prob_skip_false = 5; if (cpi->prob_skip_false > 250) cpi->prob_skip_false = 250; if (cpi->is_src_frame_alt_ref) cpi->prob_skip_false = 1; } #if 0 if (cpi->pass != 1) { FILE *f = fopen("skip.stt", "a"); fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false); fclose(f); } #endif } if (cm->frame_type == KEY_FRAME) vp8_setup_key_frame(cpi); // transform / motion compensation build reconstruction frame vp8_encode_frame(cpi); cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi); cpi->projected_frame_size = (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0; vp8_clear_system_state(); //__asm emms; // Test to see if the stats generated for this frame indicate that we should have coded a key frame // (assuming that we didn't)! if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME) { if (decide_key_frame(cpi)) { vp8_calc_auto_iframe_target_size(cpi); // Reset all our sizing numbers and recode cm->frame_type = KEY_FRAME; // Clear the Alt reference frame active flag when we have a key frame cpi->source_alt_ref_active = FALSE; // Reset the loop filter deltas and segmentation map setup_features(cpi); // If segmentation is enabled force a map update for key frames if (cpi->mb.e_mbd.segmentation_enabled) { cpi->mb.e_mbd.update_mb_segmentation_map = 1; cpi->mb.e_mbd.update_mb_segmentation_data = 1; } vp8_restore_coding_context(cpi); Q = vp8_regulate_q(cpi, cpi->this_frame_target); q_low = cpi->best_quality; q_high = cpi->worst_quality; vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit); // Limit Q range for the adaptive loop (Values not clipped to range 20-60 as in VP8). bottom_index = cpi->active_best_quality; top_index = cpi->active_worst_quality; loop_count++; Loop = TRUE; resize_key_frame(cpi); continue; } } vp8_clear_system_state(); if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1; // Are we are overshooting and up against the limit of active max Q. if (((cpi->pass != 2) || (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) && (Q == cpi->active_worst_quality) && (cpi->active_worst_quality < cpi->worst_quality) && (cpi->projected_frame_size > frame_over_shoot_limit)) { int over_size_percent = ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) / frame_over_shoot_limit; // If so is there any scope for relaxing it while ((cpi->active_worst_quality < cpi->worst_quality) && (over_size_percent > 0)) { cpi->active_worst_quality++; top_index = cpi->active_worst_quality; over_size_percent = (int)(over_size_percent * 0.96); // Assume 1 qstep = about 4% on frame size. } // If we have updated the active max Q do not call vp8_update_rate_correction_factors() this loop. active_worst_qchanged = TRUE; } else active_worst_qchanged = FALSE; #if !(CONFIG_REALTIME_ONLY) // Is the projected frame size out of range and are we allowed to attempt to recode. if (((cpi->sf.recode_loop == 1) || ((cpi->sf.recode_loop == 2) && (cm->refresh_golden_frame || (cm->frame_type == KEY_FRAME)))) && (((cpi->projected_frame_size > frame_over_shoot_limit) && (Q < top_index)) || //((cpi->projected_frame_size > frame_over_shoot_limit ) && (Q == top_index) && (cpi->zbin_over_quant < ZBIN_OQ_MAX)) || ((cpi->projected_frame_size < frame_under_shoot_limit) && (Q > bottom_index))) ) { int last_q = Q; int Retries = 0; // Frame size out of permitted range: // Update correction factor & compute new Q to try... if (cpi->projected_frame_size > frame_over_shoot_limit) { //if ( cpi->zbin_over_quant == 0 ) q_low = (Q < q_high) ? (Q + 1) : q_high; // Raise Qlow as to at least the current value if (cpi->zbin_over_quant > 0) // If we are using over quant do the same for zbin_oq_low zbin_oq_low = (cpi->zbin_over_quant < zbin_oq_high) ? (cpi->zbin_over_quant + 1) : zbin_oq_high; //if ( undershoot_seen || (Q == MAXQ) ) if (undershoot_seen) { // Update rate_correction_factor unless cpi->active_worst_quality has changed. if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 1); Q = (q_high + q_low + 1) / 2; // Adjust cpi->zbin_over_quant (only allowed when Q is max) if (Q < MAXQ) cpi->zbin_over_quant = 0; else { zbin_oq_low = (cpi->zbin_over_quant < zbin_oq_high) ? (cpi->zbin_over_quant + 1) : zbin_oq_high; cpi->zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2; } } else { // Update rate_correction_factor unless cpi->active_worst_quality has changed. if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 0); Q = vp8_regulate_q(cpi, cpi->this_frame_target); while (((Q < q_low) || (cpi->zbin_over_quant < zbin_oq_low)) && (Retries < 10)) { vp8_update_rate_correction_factors(cpi, 0); Q = vp8_regulate_q(cpi, cpi->this_frame_target); Retries ++; } } overshoot_seen = TRUE; } else { if (cpi->zbin_over_quant == 0) q_high = (Q > q_low) ? (Q - 1) : q_low; // Lower q_high if not using over quant else // else lower zbin_oq_high zbin_oq_high = (cpi->zbin_over_quant > zbin_oq_low) ? (cpi->zbin_over_quant - 1) : zbin_oq_low; if (overshoot_seen) { // Update rate_correction_factor unless cpi->active_worst_quality has changed. if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 1); Q = (q_high + q_low) / 2; // Adjust cpi->zbin_over_quant (only allowed when Q is max) if (Q < MAXQ) cpi->zbin_over_quant = 0; else cpi->zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2; } else { // Update rate_correction_factor unless cpi->active_worst_quality has changed. if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 0); Q = vp8_regulate_q(cpi, cpi->this_frame_target); while (((Q > q_high) || (cpi->zbin_over_quant > zbin_oq_high)) && (Retries < 10)) { vp8_update_rate_correction_factors(cpi, 0); Q = vp8_regulate_q(cpi, cpi->this_frame_target); Retries ++; } } undershoot_seen = TRUE; } // Clamp Q to upper and lower limits: if (Q > q_high) Q = q_high; else if (Q < q_low) Q = q_low; // Clamp cpi->zbin_over_quant cpi->zbin_over_quant = (cpi->zbin_over_quant < zbin_oq_low) ? zbin_oq_low : (cpi->zbin_over_quant > zbin_oq_high) ? zbin_oq_high : cpi->zbin_over_quant; //Loop = ((Q != last_q) || (last_zbin_oq != cpi->zbin_over_quant)) ? TRUE : FALSE; Loop = ((Q != last_q)) ? TRUE : FALSE; last_zbin_oq = cpi->zbin_over_quant; } else #endif Loop = FALSE; if (cpi->is_src_frame_alt_ref) Loop = FALSE; if (Loop == TRUE) { vp8_restore_coding_context(cpi); loop_count++; #if CONFIG_PSNR cpi->tot_recode_hits++; #endif } } while (Loop == TRUE); #if 0 // Experimental code for lagged and one pass // Update stats used for one pass GF selection { /* int frames_so_far; double frame_intra_error; double frame_coded_error; double frame_pcnt_inter; double frame_pcnt_motion; double frame_mvr; double frame_mvr_abs; double frame_mvc; double frame_mvc_abs; */ cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error; cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error; cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0; } #endif // Update the GF useage maps. // This is done after completing the compression of a frame when all modes etc. are finalized but before loop filter vp8_update_gf_useage_maps(cpi, cm, &cpi->mb); if (cm->frame_type == KEY_FRAME) cm->refresh_last_frame = 1; #if 0 { FILE *f = fopen("gfactive.stt", "a"); fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame); fclose(f); } #endif // For inter frames the current default behaviour is that when cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer // This is purely an encoder descision at present. if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame) cm->copy_buffer_to_arf = 2; else cm->copy_buffer_to_arf = 0; if (cm->refresh_last_frame) { vp8_swap_yv12_buffer(&cm->yv12_fb[cm->lst_fb_idx], &cm->yv12_fb[cm->new_fb_idx]); cm->frame_to_show = &cm->yv12_fb[cm->lst_fb_idx]; } else cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx]; //#pragma omp parallel sections { //#pragma omp section { struct vpx_usec_timer timer; vpx_usec_timer_start(&timer); if (cpi->sf.auto_filter == 0) vp8cx_pick_filter_level_fast(cpi->Source, cpi); else vp8cx_pick_filter_level(cpi->Source, cpi); vpx_usec_timer_mark(&timer); cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer); if (cm->no_lpf) cm->filter_level = 0; if (cm->filter_level > 0) { vp8cx_set_alt_lf_level(cpi, cm->filter_level); vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, cm->filter_level); cm->last_frame_type = cm->frame_type; cm->last_filter_type = cm->filter_type; cm->last_sharpness_level = cm->sharpness_level; } vp8_yv12_extend_frame_borders_ptr(cm->frame_to_show); if (cpi->oxcf.error_resilient_mode == 1) { cm->refresh_entropy_probs = 0; } } //#pragma omp section { // build the bitstream vp8_pack_bitstream(cpi, dest, size); } } { YV12_BUFFER_CONFIG *lst_yv12 = &cm->yv12_fb[cm->lst_fb_idx]; YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx]; YV12_BUFFER_CONFIG *gld_yv12 = &cm->yv12_fb[cm->gld_fb_idx]; YV12_BUFFER_CONFIG *alt_yv12 = &cm->yv12_fb[cm->alt_fb_idx]; // At this point the new frame has been encoded coded. // If any buffer copy / swaping is signalled it should be done here. if (cm->frame_type == KEY_FRAME) { vp8_yv12_copy_frame_ptr(cm->frame_to_show, gld_yv12); vp8_yv12_copy_frame_ptr(cm->frame_to_show, alt_yv12); } else // For non key frames { // Code to copy between reference buffers if (cm->copy_buffer_to_arf) { if (cm->copy_buffer_to_arf == 1) { if (cm->refresh_last_frame) // We copy new_frame here because last and new buffers will already have been swapped if cm->refresh_last_frame is set. vp8_yv12_copy_frame_ptr(new_yv12, alt_yv12); else vp8_yv12_copy_frame_ptr(lst_yv12, alt_yv12); } else if (cm->copy_buffer_to_arf == 2) vp8_yv12_copy_frame_ptr(gld_yv12, alt_yv12); } if (cm->copy_buffer_to_gf) { if (cm->copy_buffer_to_gf == 1) { if (cm->refresh_last_frame) // We copy new_frame here because last and new buffers will already have been swapped if cm->refresh_last_frame is set. vp8_yv12_copy_frame_ptr(new_yv12, gld_yv12); else vp8_yv12_copy_frame_ptr(lst_yv12, gld_yv12); } else if (cm->copy_buffer_to_gf == 2) vp8_yv12_copy_frame_ptr(alt_yv12, gld_yv12); } } } // Update rate control heuristics cpi->total_byte_count += (*size); cpi->projected_frame_size = (*size) << 3; if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 2); cpi->last_q[cm->frame_type] = cm->base_qindex; if (cm->frame_type == KEY_FRAME) { vp8_adjust_key_frame_context(cpi); } // Keep a record of ambient average Q. if (cm->frame_type == KEY_FRAME) cpi->avg_frame_qindex = cm->base_qindex; else cpi->avg_frame_qindex = (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2; // Keep a record from which we can calculate the average Q excluding GF updates and key frames if ((cm->frame_type != KEY_FRAME) && !cm->refresh_golden_frame && !cm->refresh_alt_ref_frame) { cpi->ni_frames++; // Calculate the average Q for normal inter frames (not key or GFU frames) // This is used as a basis for setting active worst quality. if (cpi->ni_frames > 150) { cpi->ni_tot_qi += Q; cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames); } // Early in the clip ... average the current frame Q value with the default // entered by the user as a dampening measure else { cpi->ni_tot_qi += Q; cpi->ni_av_qi = ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2; } // If the average Q is higher than what was used in the last frame // (after going through the recode loop to keep the frame size within range) // then use the last frame value - 1. // The -1 is designed to stop Q and hence the data rate, from progressively // falling away during difficult sections, but at the same time reduce the number of // itterations around the recode loop. if (Q > cpi->ni_av_qi) cpi->ni_av_qi = Q - 1; } #if 0 // If the frame was massively oversize and we are below optimal buffer level drop next frame if ((cpi->drop_frames_allowed) && (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) && (cpi->buffer_level < cpi->oxcf.drop_frames_water_mark * cpi->oxcf.optimal_buffer_level / 100) && (cpi->projected_frame_size > (4 * cpi->this_frame_target))) { cpi->drop_frame = TRUE; } #endif // Set the count for maximum consequative dropped frames based upon the ratio of // this frame size to the target average per frame bandwidth. // (cpi->av_per_frame_bandwidth > 0) is just a sanity check to prevent / 0. if (cpi->drop_frames_allowed && (cpi->av_per_frame_bandwidth > 0)) { cpi->max_drop_count = cpi->projected_frame_size / cpi->av_per_frame_bandwidth; if (cpi->max_drop_count > cpi->max_consec_dropped_frames) cpi->max_drop_count = cpi->max_consec_dropped_frames; } // Update the buffer level variable. if (cpi->common.refresh_alt_ref_frame) cpi->bits_off_target -= cpi->projected_frame_size; else cpi->bits_off_target += cpi->av_per_frame_bandwidth - cpi->projected_frame_size; // Rolling monitors of whether we are over or underspending used to help regulate min and Max Q in two pass. cpi->rolling_target_bits = ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4; cpi->rolling_actual_bits = ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4; cpi->long_rolling_target_bits = ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32; cpi->long_rolling_actual_bits = ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) / 32; // Actual bits spent cpi->total_actual_bits += cpi->projected_frame_size; // Debug stats cpi->total_target_vs_actual += (cpi->this_frame_target - cpi->projected_frame_size); cpi->buffer_level = cpi->bits_off_target; // Update bits left to the kf and gf groups to account for overshoot or undershoot on these frames if (cm->frame_type == KEY_FRAME) { cpi->kf_group_bits += cpi->this_frame_target - cpi->projected_frame_size; if (cpi->kf_group_bits < 0) cpi->kf_group_bits = 0 ; } else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) { cpi->gf_group_bits += cpi->this_frame_target - cpi->projected_frame_size; if (cpi->gf_group_bits < 0) cpi->gf_group_bits = 0 ; } if (cm->frame_type != KEY_FRAME) { if (cpi->common.refresh_alt_ref_frame) { cpi->last_skip_false_probs[2] = cpi->prob_skip_false; cpi->last_skip_probs_q[2] = cm->base_qindex; } else if (cpi->common.refresh_golden_frame) { cpi->last_skip_false_probs[1] = cpi->prob_skip_false; cpi->last_skip_probs_q[1] = cm->base_qindex; } else { cpi->last_skip_false_probs[0] = cpi->prob_skip_false; cpi->last_skip_probs_q[0] = cm->base_qindex; //update the baseline cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false; } } #if 0 && CONFIG_PSNR { FILE *f = fopen("tmp.stt", "a"); vp8_clear_system_state(); //__asm emms; if (cpi->total_coded_error_left != 0.0) fprintf(f, "%10d %10d %10d %10d %10d %10d %10d %10d %6ld %6ld" "%6ld %6ld %5ld %5ld %5ld %8ld %8.2f %10d %10.3f" "%10.3f %8ld\n", cpi->common.current_video_frame, cpi->this_frame_target, cpi->projected_frame_size, (cpi->projected_frame_size - cpi->this_frame_target), (int)cpi->total_target_vs_actual, (cpi->oxcf.starting_buffer_level-cpi->bits_off_target), (int)cpi->total_actual_bits, cm->base_qindex, cpi->active_best_quality, cpi->active_worst_quality, cpi->avg_frame_qindex, cpi->zbin_over_quant, cm->refresh_golden_frame, cm->refresh_alt_ref_frame, cm->frame_type, cpi->gfu_boost, cpi->est_max_qcorrection_factor, (int)cpi->bits_left, cpi->total_coded_error_left, (double)cpi->bits_left / cpi->total_coded_error_left, cpi->tot_recode_hits); else fprintf(f, "%10d %10d %10d %10d %10d %10d %10d %10d %6ld %6ld" "%6ld %6ld %5ld %5ld %5ld %8ld %8.2f %10d %10.3f" "%8ld\n", cpi->common.current_video_frame, cpi->this_frame_target, cpi->projected_frame_size, (cpi->projected_frame_size - cpi->this_frame_target), (int)cpi->total_target_vs_actual, (cpi->oxcf.starting_buffer_level-cpi->bits_off_target), (int)cpi->total_actual_bits, cm->base_qindex, cpi->active_best_quality, cpi->active_worst_quality, cpi->avg_frame_qindex, cpi->zbin_over_quant, cm->refresh_golden_frame, cm->refresh_alt_ref_frame, cm->frame_type, cpi->gfu_boost, cpi->est_max_qcorrection_factor, (int)cpi->bits_left, cpi->total_coded_error_left, cpi->tot_recode_hits); fclose(f); { FILE *fmodes = fopen("Modes.stt", "a"); int i; fprintf(fmodes, "%6d:%1d:%1d:%1d ", cpi->common.current_video_frame, cm->frame_type, cm->refresh_golden_frame, cm->refresh_alt_ref_frame); for (i = 0; i < MAX_MODES; i++) fprintf(fmodes, "%5d ", cpi->mode_chosen_counts[i]); fprintf(fmodes, "\n"); fclose(fmodes); } } #endif // If this was a kf or Gf note the Q if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame || cm->refresh_alt_ref_frame) cm->last_kf_gf_q = cm->base_qindex; if (cm->refresh_golden_frame == 1) cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN; else cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_GOLDEN; if (cm->refresh_alt_ref_frame == 1) cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF; else cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_ALTREF; if (cm->refresh_last_frame & cm->refresh_golden_frame) // both refreshed cpi->gold_is_last = 1; else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) // 1 refreshed but not the other cpi->gold_is_last = 0; if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) // both refreshed cpi->alt_is_last = 1; else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) // 1 refreshed but not the other cpi->alt_is_last = 0; if (cm->refresh_alt_ref_frame & cm->refresh_golden_frame) // both refreshed cpi->gold_is_alt = 1; else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) // 1 refreshed but not the other cpi->gold_is_alt = 0; cpi->ref_frame_flags = VP8_ALT_FLAG | VP8_GOLD_FLAG | VP8_LAST_FLAG; if (cpi->gold_is_last) cpi->ref_frame_flags &= ~VP8_GOLD_FLAG; if (cpi->alt_is_last) cpi->ref_frame_flags &= ~VP8_ALT_FLAG; if (cpi->gold_is_alt) cpi->ref_frame_flags &= ~VP8_ALT_FLAG; if (cpi->oxcf.error_resilient_mode) { // Is this an alternate reference update if (cpi->common.refresh_alt_ref_frame) vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->yv12_fb[cm->alt_fb_idx]); if (cpi->common.refresh_golden_frame) vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->yv12_fb[cm->gld_fb_idx]); } else { if (cpi->oxcf.play_alternate && cpi->common.refresh_alt_ref_frame) // Update the alternate reference frame and stats as appropriate. update_alt_ref_frame_and_stats(cpi); else // Update the Golden frame and golden frame and stats as appropriate. update_golden_frame_and_stats(cpi); } if (cm->frame_type == KEY_FRAME) { // Tell the caller that the frame was coded as a key frame *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY; // As this frame is a key frame the next defaults to an inter frame. cm->frame_type = INTER_FRAME; cpi->last_frame_percent_intra = 100; } else { *frame_flags = cm->frame_flags&~FRAMEFLAGS_KEY; cpi->last_frame_percent_intra = cpi->this_frame_percent_intra; } // Clear the one shot update flags for segmentation map and mode/ref loop filter deltas. cpi->mb.e_mbd.update_mb_segmentation_map = 0; cpi->mb.e_mbd.update_mb_segmentation_data = 0; cpi->mb.e_mbd.mode_ref_lf_delta_update = 0; // Dont increment frame counters if this was an altref buffer update not a real frame if (cm->show_frame) { cm->current_video_frame++; cpi->frames_since_key++; } // reset to normal state now that we are done. #if 0 { char filename[512]; FILE *recon_file; sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame); recon_file = fopen(filename, "wb"); fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc, cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file); fclose(recon_file); } #endif // DEBUG //vp8_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); } int vp8_is_gf_update_needed(VP8_PTR ptr) { VP8_COMP *cpi = (VP8_COMP *) ptr; int ret_val; ret_val = cpi->gf_update_recommended; cpi->gf_update_recommended = 0; return ret_val; } void vp8_check_gf_quality(VP8_COMP *cpi) { VP8_COMMON *cm = &cpi->common; int gf_active_pct = (100 * cpi->gf_active_count) / (cm->mb_rows * cm->mb_cols); int gf_ref_usage_pct = (cpi->count_mb_ref_frame_usage[GOLDEN_FRAME] * 100) / (cm->mb_rows * cm->mb_cols); int last_ref_zz_useage = (cpi->inter_zz_count * 100) / (cm->mb_rows * cm->mb_cols); // Gf refresh is not currently being signalled if (cpi->gf_update_recommended == 0) { if (cpi->common.frames_since_golden > 7) { // Low use of gf if ((gf_active_pct < 10) || ((gf_active_pct + gf_ref_usage_pct) < 15)) { // ...but last frame zero zero usage is reasonbable so a new gf might be appropriate if (last_ref_zz_useage >= 25) { cpi->gf_bad_count ++; if (cpi->gf_bad_count >= 8) // Check that the condition is stable { cpi->gf_update_recommended = 1; cpi->gf_bad_count = 0; } } else cpi->gf_bad_count = 0; // Restart count as the background is not stable enough } else cpi->gf_bad_count = 0; // Gf useage has picked up so reset count } } // If the signal is set but has not been read should we cancel it. else if (last_ref_zz_useage < 15) { cpi->gf_update_recommended = 0; cpi->gf_bad_count = 0; } #if 0 { FILE *f = fopen("gfneeded.stt", "a"); fprintf(f, "%10d %10d %10d %10d %10ld \n", cm->current_video_frame, cpi->common.frames_since_golden, gf_active_pct, gf_ref_usage_pct, cpi->gf_update_recommended); fclose(f); } #endif } #if !(CONFIG_REALTIME_ONLY) static void Pass2Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags) { if (!cpi->common.refresh_alt_ref_frame) vp8_second_pass(cpi); encode_frame_to_data_rate(cpi, size, dest, frame_flags); cpi->bits_left -= 8 * *size; if (!cpi->common.refresh_alt_ref_frame) { double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth *cpi->oxcf.two_pass_vbrmin_section / 100); cpi->bits_left += (long long)(two_pass_min_rate / cpi->oxcf.frame_rate); } } #endif //For ARM NEON, d8-d15 are callee-saved registers, and need to be saved by us. #if HAVE_ARMV7 extern void vp8_push_neon(INT64 *store); extern void vp8_pop_neon(INT64 *store); #endif int vp8_receive_raw_frame(VP8_PTR ptr, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, INT64 time_stamp, INT64 end_time) { INT64 store_reg[8]; VP8_COMP *cpi = (VP8_COMP *) ptr; VP8_COMMON *cm = &cpi->common; struct vpx_usec_timer timer; if (!cpi) return -1; #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_push_neon(store_reg); } #endif vpx_usec_timer_start(&timer); // no more room for frames; if (cpi->source_buffer_count != 0 && cpi->source_buffer_count >= cpi->oxcf.lag_in_frames) { #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_pop_neon(store_reg); } #endif return -1; } //printf("in-cpi->source_buffer_count: %d\n", cpi->source_buffer_count); cm->clr_type = sd->clrtype; // make a copy of the frame for use later... #if !(CONFIG_REALTIME_ONLY) if (cpi->oxcf.allow_lag) { int which_buffer = cpi->source_encode_index - 1; SOURCE_SAMPLE *s; if (which_buffer == -1) which_buffer = cpi->oxcf.lag_in_frames - 1; if (cpi->source_buffer_count < cpi->oxcf.lag_in_frames - 1) which_buffer = cpi->source_buffer_count; s = &cpi->src_buffer[which_buffer]; s->source_time_stamp = time_stamp; s->source_end_time_stamp = end_time; s->source_frame_flags = frame_flags; vp8_yv12_copy_frame_ptr(sd, &s->source_buffer); cpi->source_buffer_count ++; } else #endif { SOURCE_SAMPLE *s; s = &cpi->src_buffer[0]; s->source_end_time_stamp = end_time; s->source_time_stamp = time_stamp; s->source_frame_flags = frame_flags; #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_yv12_copy_src_frame_func_neon(sd, &s->source_buffer); } #if CONFIG_RUNTIME_CPU_DETECT else #endif #endif #if !HAVE_ARMV7 || CONFIG_RUNTIME_CPU_DETECT { vp8_yv12_copy_frame_ptr(sd, &s->source_buffer); } #endif cpi->source_buffer_count = 1; } vpx_usec_timer_mark(&timer); cpi->time_receive_data += vpx_usec_timer_elapsed(&timer); #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_pop_neon(store_reg); } #endif return 0; } int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, INT64 *time_stamp, INT64 *time_end, int flush) { INT64 store_reg[8]; VP8_COMP *cpi = (VP8_COMP *) ptr; VP8_COMMON *cm = &cpi->common; struct vpx_usec_timer tsctimer; struct vpx_usec_timer ticktimer; struct vpx_usec_timer cmptimer; if (!cpi) return -1; #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_push_neon(store_reg); } #endif vpx_usec_timer_start(&cmptimer); // flush variable tells us that even though we have less than 10 frames // in our buffer we need to start producing compressed frames. // Probably because we are at the end of a file.... if ((cpi->source_buffer_count == cpi->oxcf.lag_in_frames && cpi->oxcf.lag_in_frames > 0) || (!cpi->oxcf.allow_lag && cpi->source_buffer_count > 0) || (flush && cpi->source_buffer_count > 0)) { SOURCE_SAMPLE *s; s = &cpi->src_buffer[cpi->source_encode_index]; cpi->source_time_stamp = s->source_time_stamp; cpi->source_end_time_stamp = s->source_end_time_stamp; #if !(CONFIG_REALTIME_ONLY) // Should we code an alternate reference frame if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.play_alternate && cpi->source_alt_ref_pending && (cpi->frames_till_gf_update_due < cpi->source_buffer_count) && cpi->oxcf.lag_in_frames != 0) { cpi->last_alt_ref_sei = (cpi->source_encode_index + cpi->frames_till_gf_update_due) % cpi->oxcf.lag_in_frames; #if VP8_TEMPORAL_ALT_REF if (cpi->oxcf.arnr_max_frames > 0) { #if 0 // my attempt at a loop that tests the results of strength filter. int start_frame = cpi->last_alt_ref_sei - 3; int i, besti = -1, pastin = cpi->oxcf.arnr_strength; int besterr; if (start_frame < 0) start_frame += cpi->oxcf.lag_in_frames; besterr = vp8_calc_low_ss_err(&cpi->src_buffer[cpi->last_alt_ref_sei].source_buffer, &cpi->src_buffer[start_frame].source_buffer, IF_RTCD(&cpi->rtcd.variance)); for (i = 0; i < 7; i++) { int thiserr; cpi->oxcf.arnr_strength = i; vp8cx_temp_filter_c(cpi); thiserr = vp8_calc_low_ss_err(&cpi->alt_ref_buffer.source_buffer, &cpi->src_buffer[start_frame].source_buffer, IF_RTCD(&cpi->rtcd.variance)); if (10 * thiserr < besterr * 8) { besterr = thiserr; besti = i; } } if (besti != -1) { cpi->oxcf.arnr_strength = besti; vp8cx_temp_filter_c(cpi); s = &cpi->alt_ref_buffer; // FWG not sure if I need to copy this data for the Alt Ref frame s->source_time_stamp = cpi->src_buffer[cpi->last_alt_ref_sei].source_time_stamp; s->source_end_time_stamp = cpi->src_buffer[cpi->last_alt_ref_sei].source_end_time_stamp; s->source_frame_flags = cpi->src_buffer[cpi->last_alt_ref_sei].source_frame_flags; } else s = &cpi->src_buffer[cpi->last_alt_ref_sei]; #else vp8cx_temp_filter_c(cpi); s = &cpi->alt_ref_buffer; // FWG not sure if I need to copy this data for the Alt Ref frame s->source_time_stamp = cpi->src_buffer[cpi->last_alt_ref_sei].source_time_stamp; s->source_end_time_stamp = cpi->src_buffer[cpi->last_alt_ref_sei].source_end_time_stamp; s->source_frame_flags = cpi->src_buffer[cpi->last_alt_ref_sei].source_frame_flags; #endif } else #endif s = &cpi->src_buffer[cpi->last_alt_ref_sei]; cm->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due; cm->refresh_alt_ref_frame = 1; cm->refresh_golden_frame = 0; cm->refresh_last_frame = 0; cm->show_frame = 0; cpi->source_alt_ref_pending = FALSE; // Clear Pending altf Ref flag. cpi->is_src_frame_alt_ref = 0; cpi->is_next_src_alt_ref = 0; } else #endif { cm->show_frame = 1; #if !(CONFIG_REALTIME_ONLY) if (cpi->oxcf.allow_lag) { if (cpi->source_encode_index == cpi->last_alt_ref_sei) { cpi->is_src_frame_alt_ref = 1; cpi->last_alt_ref_sei = -1; } else cpi->is_src_frame_alt_ref = 0; cpi->source_encode_index = (cpi->source_encode_index + 1) % cpi->oxcf.lag_in_frames; if(cpi->source_encode_index == cpi->last_alt_ref_sei) cpi->is_next_src_alt_ref = 1; else cpi->is_next_src_alt_ref = 0; } #endif cpi->source_buffer_count--; } cpi->un_scaled_source = &s->source_buffer; cpi->Source = &s->source_buffer; cpi->source_frame_flags = s->source_frame_flags; *time_stamp = cpi->source_time_stamp; *time_end = cpi->source_end_time_stamp; } else { *size = 0; #if !(CONFIG_REALTIME_ONLY) if (flush && cpi->pass == 1 && !cpi->first_pass_done) { vp8_end_first_pass(cpi); /* get last stats packet */ cpi->first_pass_done = 1; } #endif #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_pop_neon(store_reg); } #endif return -1; } *frame_flags = cpi->source_frame_flags; #if CONFIG_PSNR if (cpi->source_time_stamp < cpi->first_time_stamp_ever) cpi->first_time_stamp_ever = cpi->source_time_stamp; #endif // adjust frame rates based on timestamps given if (!cm->refresh_alt_ref_frame) { if (cpi->last_time_stamp_seen == 0) { double this_fps = 10000000.000 / (cpi->source_end_time_stamp - cpi->source_time_stamp); vp8_new_frame_rate(cpi, this_fps); } else { long long nanosecs = cpi->source_time_stamp - cpi->last_time_stamp_seen; double this_fps = 10000000.000 / nanosecs; vp8_new_frame_rate(cpi, (7 * cpi->oxcf.frame_rate + this_fps) / 8); } cpi->last_time_stamp_seen = cpi->source_time_stamp; } if (cpi->compressor_speed == 2) { vp8_check_gf_quality(cpi); } if (!cpi) { #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_pop_neon(store_reg); } #endif return 0; } if (cpi->compressor_speed == 2) { vpx_usec_timer_start(&tsctimer); vpx_usec_timer_start(&ticktimer); } // start with a 0 size frame *size = 0; // Clear down mmx registers vp8_clear_system_state(); //__asm emms; cm->frame_type = INTER_FRAME; cm->frame_flags = *frame_flags; #if 0 if (cm->refresh_alt_ref_frame) { //cm->refresh_golden_frame = 1; cm->refresh_golden_frame = 0; cm->refresh_last_frame = 0; } else { cm->refresh_golden_frame = 0; cm->refresh_last_frame = 1; } #endif #if !(CONFIG_REALTIME_ONLY) if (cpi->pass == 1) { Pass1Encode(cpi, size, dest, frame_flags); } else if (cpi->pass == 2) { Pass2Encode(cpi, size, dest, frame_flags); } else #endif encode_frame_to_data_rate(cpi, size, dest, frame_flags); if (cpi->compressor_speed == 2) { unsigned int duration, duration2; vpx_usec_timer_mark(&tsctimer); vpx_usec_timer_mark(&ticktimer); duration = vpx_usec_timer_elapsed(&ticktimer); duration2 = (unsigned int)((double)duration / 2); if (cm->frame_type != KEY_FRAME) { if (cpi->avg_encode_time == 0) cpi->avg_encode_time = duration; else cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3; } if (duration2) { //if(*frame_flags!=1) { if (cpi->avg_pick_mode_time == 0) cpi->avg_pick_mode_time = duration2; else cpi->avg_pick_mode_time = (7 * cpi->avg_pick_mode_time + duration2) >> 3; } } } if (cm->refresh_entropy_probs == 0) { vpx_memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc)); } // if its a dropped frame honor the requests on subsequent frames if (*size > 0) { // return to normal state cm->refresh_entropy_probs = 1; cm->refresh_alt_ref_frame = 0; cm->refresh_golden_frame = 0; cm->refresh_last_frame = 1; cm->frame_type = INTER_FRAME; } cpi->ready_for_new_frame = 1; vpx_usec_timer_mark(&cmptimer); cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer); if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame) generate_psnr_packet(cpi); #if CONFIG_PSNR if (cpi->pass != 1) { cpi->bytes += *size; if (cm->show_frame) { cpi->count ++; if (cpi->b_calculate_psnr) { double y, u, v; double sq_error; double frame_psnr = vp8_calc_psnr(cpi->Source, cm->frame_to_show, &y, &u, &v, &sq_error); cpi->total_y += y; cpi->total_u += u; cpi->total_v += v; cpi->total_sq_error += sq_error; cpi->total += frame_psnr; { double y2, u2, v2, frame_psnr2, frame_ssim2 = 0; double weight = 0; vp8_deblock(cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0, IF_RTCD(&cm->rtcd.postproc)); vp8_clear_system_state(); frame_psnr2 = vp8_calc_psnr(cpi->Source, &cm->post_proc_buffer, &y2, &u2, &v2, &sq_error); frame_ssim2 = vp8_calc_ssim(cpi->Source, &cm->post_proc_buffer, 1, &weight); cpi->summed_quality += frame_ssim2 * weight; cpi->summed_weights += weight; cpi->totalp_y += y2; cpi->totalp_u += u2; cpi->totalp_v += v2; cpi->totalp += frame_psnr2; cpi->total_sq_error2 += sq_error; } } if (cpi->b_calculate_ssimg) { double y, u, v, frame_all; frame_all = vp8_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u, &v); cpi->total_ssimg_y += y; cpi->total_ssimg_u += u; cpi->total_ssimg_v += v; cpi->total_ssimg_all += frame_all; } } } #if 0 if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q) { skiptruecount += cpi->skip_true_count; skipfalsecount += cpi->skip_false_count; } #endif #if 0 if (cpi->pass != 1) { FILE *f = fopen("skip.stt", "a"); fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size); if (cpi->is_src_frame_alt_ref == 1) fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size); fclose(f); } #endif #endif #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_pop_neon(store_reg); } #endif return 0; } int vp8_get_preview_raw_frame(VP8_PTR comp, YV12_BUFFER_CONFIG *dest, int deblock_level, int noise_level, int flags) { VP8_COMP *cpi = (VP8_COMP *) comp; if (cpi->common.refresh_alt_ref_frame) return -1; else { int ret; #if CONFIG_POSTPROC ret = vp8_post_proc_frame(&cpi->common, dest, deblock_level, noise_level, flags); #else if (cpi->common.frame_to_show) { *dest = *cpi->common.frame_to_show; dest->y_width = cpi->common.Width; dest->y_height = cpi->common.Height; dest->uv_height = cpi->common.Height / 2; ret = 0; } else { ret = -1; } #endif //!CONFIG_POSTPROC vp8_clear_system_state(); return ret; } } int vp8_set_roimap(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[4], int delta_lf[4], unsigned int threshold[4]) { VP8_COMP *cpi = (VP8_COMP *) comp; signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; if (cpi->common.mb_rows != rows || cpi->common.mb_cols != cols) return -1; if (!map) { disable_segmentation((VP8_PTR)cpi); return 0; } // Set the segmentation Map set_segmentation_map((VP8_PTR)cpi, map); // Activate segmentation. enable_segmentation((VP8_PTR)cpi); // Set up the quant segment data feature_data[MB_LVL_ALT_Q][0] = delta_q[0]; feature_data[MB_LVL_ALT_Q][1] = delta_q[1]; feature_data[MB_LVL_ALT_Q][2] = delta_q[2]; feature_data[MB_LVL_ALT_Q][3] = delta_q[3]; // Set up the loop segment data s feature_data[MB_LVL_ALT_LF][0] = delta_lf[0]; feature_data[MB_LVL_ALT_LF][1] = delta_lf[1]; feature_data[MB_LVL_ALT_LF][2] = delta_lf[2]; feature_data[MB_LVL_ALT_LF][3] = delta_lf[3]; cpi->segment_encode_breakout[0] = threshold[0]; cpi->segment_encode_breakout[1] = threshold[1]; cpi->segment_encode_breakout[2] = threshold[2]; cpi->segment_encode_breakout[3] = threshold[3]; // Initialise the feature data structure // SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1 set_segment_data((VP8_PTR)cpi, &feature_data[0][0], SEGMENT_DELTADATA); return 0; } int vp8_set_active_map(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols) { VP8_COMP *cpi = (VP8_COMP *) comp; if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) { if (map) { vpx_memcpy(cpi->active_map, map, rows * cols); cpi->active_map_enabled = 1; } else cpi->active_map_enabled = 0; return 0; } else { //cpi->active_map_enabled = 0; return -1 ; } } int vp8_set_internal_size(VP8_PTR comp, VPX_SCALING horiz_mode, VPX_SCALING vert_mode) { VP8_COMP *cpi = (VP8_COMP *) comp; if (horiz_mode >= NORMAL && horiz_mode <= ONETWO) cpi->common.horiz_scale = horiz_mode; else return -1; if (vert_mode >= NORMAL && vert_mode <= ONETWO) cpi->common.vert_scale = vert_mode; else return -1; return 0; } int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd) { int i, j; int Total = 0; unsigned char *src = source->y_buffer; unsigned char *dst = dest->y_buffer; (void)rtcd; // Loop through the Y plane raw and reconstruction data summing (square differences) for (i = 0; i < source->y_height; i += 16) { for (j = 0; j < source->y_width; j += 16) { unsigned int sse; Total += VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse); } src += 16 * source->y_stride; dst += 16 * dest->y_stride; } return Total; } int vp8_calc_low_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd) { int i, j; int Total = 0; unsigned char *src = source->y_buffer; unsigned char *dst = dest->y_buffer; (void)rtcd; // Loop through the Y plane raw and reconstruction data summing (square differences) for (i = 0; i < source->y_height; i += 16) { for (j = 0; j < source->y_width; j += 16) { unsigned int sse; VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse); if (sse < 8096) Total += sse; } src += 16 * source->y_stride; dst += 16 * dest->y_stride; } return Total; } int vp8_get_speed(VP8_PTR c) { VP8_COMP *cpi = (VP8_COMP *) c; return cpi->Speed; } int vp8_get_quantizer(VP8_PTR c) { VP8_COMP *cpi = (VP8_COMP *) c; return cpi->common.base_qindex; }
GB_binop__minus_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__minus_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__minus_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__minus_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fp64) // A*D function (colscale): GB (_AxD__minus_fp64) // D*A function (rowscale): GB (_DxB__minus_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__minus_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__minus_fp64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fp64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fp64) // C=scalar+B GB (_bind1st__minus_fp64) // C=scalar+B' GB (_bind1st_tran__minus_fp64) // C=A+scalar GB (_bind2nd__minus_fp64) // C=A'+scalar GB (_bind2nd_tran__minus_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_FP64 || GxB_NO_MINUS_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__minus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_fp64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pcpaes_cfbdecrypt.c
/******************************************************************************* * Copyright 2013-2018 Intel Corporation * All Rights Reserved. * * If this software was obtained under the Intel Simplified Software License, * the following terms apply: * * The source code, information and material ("Material") contained herein is * owned by Intel Corporation or its suppliers or licensors, and title to such * Material remains with Intel Corporation or its suppliers or licensors. The * Material contains proprietary information of Intel or its suppliers and * licensors. The Material is protected by worldwide copyright laws and treaty * provisions. No part of the Material may be used, copied, reproduced, * modified, published, uploaded, posted, transmitted, distributed or disclosed * in any way without Intel's prior express written permission. No license under * any patent, copyright or other intellectual property rights in the Material * is granted to or conferred upon you, either expressly, by implication, * inducement, estoppel or otherwise. Any license under such intellectual * property rights must be express and approved by Intel in writing. * * Unless otherwise agreed by Intel in writing, you may not remove or alter this * notice or any other notice embedded in Materials by Intel or Intel's * suppliers or licensors in any way. * * * If this software was obtained under the Apache License, Version 2.0 (the * "License"), the following terms apply: * * You may not use this file except in compliance with the License. You may * obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /* // // Purpose: // Cryptography Primitive. // AES encryption/decryption (CFB mode) // // Contents: // ippsAESDecryptCFB() // */ #include "owndefs.h" #include "owncp.h" #include "pcpaesm.h" #include "pcptool.h" #if defined(_OPENMP) # include "omp.h" #endif #if (_ALG_AES_SAFE_==_ALG_AES_SAFE_COMPOSITE_GF_) # pragma message("_ALG_AES_SAFE_COMPOSITE_GF_ enabled") #elif (_ALG_AES_SAFE_==_ALG_AES_SAFE_COMPACT_SBOX_) # pragma message("_ALG_AES_SAFE_COMPACT_SBOX_ enabled") # include "pcprijtables.h" #else # pragma message("_ALG_AES_SAFE_ disabled") #endif /*F* // Name: ippsAESDecryptCFB // // Purpose: AES-CFB decryption. // // Returns: Reason: // ippStsNullPtrErr pCtx == NULL // pSrc == NULL // pDst == NULL // pIV == NULL // ippStsContextMatchErr !VALID_AES_ID() // ippStsLengthErr len <1 // ippStsCFBSizeErr (1>cfbBlkSize || cfbBlkSize>MBS_RIJ128) // ippStsUnderRunErr 0!=(dataLen%cfbBlkSize) // ippStsNoErr no errors // // Parameters: // pSrc pointer to the source data buffer // pDst pointer to the target data buffer // len output buffer length (in bytes) // cfbBlkSize CFB block size (in bytes) // pCtx pointer to the AES context // pIV pointer to the initialization vector // *F*/ static void cpDecryptAES_cfb(const Ipp8u* pIV, const Ipp8u* pSrc, Ipp8u* pDst, int nBlocks, int cfbBlkSize, const IppsAESSpec* pCtx) { #if (_IPP>=_IPP_P8) || (_IPP32E>=_IPP32E_Y8) /* use pipelined version is possible */ if(AES_NI_ENABLED==RIJ_AESNI(pCtx)) { if(cfbBlkSize==MBS_RIJ128) DecryptCFB128_RIJ128pipe_AES_NI(pSrc, pDst, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), nBlocks*cfbBlkSize, pIV); else if(0==(cfbBlkSize&3)) DecryptCFB32_RIJ128pipe_AES_NI(pSrc, pDst, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), nBlocks, cfbBlkSize, pIV); else DecryptCFB_RIJ128pipe_AES_NI(pSrc, pDst, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), nBlocks, cfbBlkSize, pIV); } else #endif { Ipp32u tmpInp[2*NB(128)]; Ipp32u tmpOut[ NB(128)]; /* setup encoder method */ RijnCipher encoder = RIJ_ENCODER(pCtx); /* read IV */ CopyBlock16(pIV, tmpInp); /* decrypt data block-by-block of cfbLen each */ while(nBlocks) { /* decryption */ //encoder(tmpInp, tmpOut, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), (const Ipp32u (*)[256])RIJ_ENC_SBOX(pCtx)); #if (_ALG_AES_SAFE_==_ALG_AES_SAFE_COMPACT_SBOX_) encoder((Ipp8u*)tmpInp, (Ipp8u*)tmpOut, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), RijEncSbox/*NULL*/); #else encoder((Ipp8u*)tmpInp, (Ipp8u*)tmpOut, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), NULL); #endif /* store output and put feedback into the input buffer (tmpInp) */ if( cfbBlkSize==MBS_RIJ128 && pSrc!=pDst) { ((Ipp32u*)pDst)[0] = tmpOut[0]^((Ipp32u*)pSrc)[0]; ((Ipp32u*)pDst)[1] = tmpOut[1]^((Ipp32u*)pSrc)[1]; ((Ipp32u*)pDst)[2] = tmpOut[2]^((Ipp32u*)pSrc)[2]; ((Ipp32u*)pDst)[3] = tmpOut[3]^((Ipp32u*)pSrc)[3]; tmpInp[0] = ((Ipp32u*)pSrc)[0]; tmpInp[1] = ((Ipp32u*)pSrc)[1]; tmpInp[2] = ((Ipp32u*)pSrc)[2]; tmpInp[3] = ((Ipp32u*)pSrc)[3]; } else { int n; for(n=0; n<cfbBlkSize; n++) { ((Ipp8u*)tmpInp)[MBS_RIJ128+n] = pSrc[n]; pDst[n] = (Ipp8u)( ((Ipp8u*)tmpOut)[n] ^ pSrc[n] ); } /* shift input buffer (tmpInp) for the next CFB operation */ CopyBlock16((Ipp8u*)tmpInp+cfbBlkSize, tmpInp); } pSrc += cfbBlkSize; pDst += cfbBlkSize; nBlocks--; } } } IPPFUN(IppStatus, ippsAESDecryptCFB,(const Ipp8u* pSrc, Ipp8u* pDst, int len, int cfbBlkSize, const IppsAESSpec* pCtx, const Ipp8u* pIV)) { /* test context */ IPP_BAD_PTR1_RET(pCtx); /* use aligned AES context */ pCtx = (IppsAESSpec*)( IPP_ALIGNED_PTR(pCtx, AES_ALIGNMENT) ); /* test the context ID */ IPP_BADARG_RET(!VALID_AES_ID(pCtx), ippStsContextMatchErr); /* test source, target buffers and initialization pointers */ IPP_BAD_PTR3_RET(pSrc, pIV, pDst); /* test stream length */ IPP_BADARG_RET((len<1), ippStsLengthErr); /* test CFB value */ IPP_BADARG_RET(((1>cfbBlkSize) || (MBS_RIJ128<cfbBlkSize)), ippStsCFBSizeErr); /* test stream integrity */ IPP_BADARG_RET((len%cfbBlkSize), ippStsUnderRunErr); /* do encryption */ { int nBlocks = len / cfbBlkSize; #if !defined(_OPENMP) cpDecryptAES_cfb(pIV, pSrc, pDst, nBlocks, cfbBlkSize, pCtx); #else int blk_per_thread = AES_NI_ENABLED==RIJ_AESNI(pCtx)? AESNI128_MIN_BLK_PER_THREAD : RIJ128_MIN_BLK_PER_THREAD; int nThreads = IPP_MIN(IPPCP_GET_NUM_THREADS(), IPP_MAX(nBlocks/blk_per_thread, 1)); if(1==nThreads) cpDecryptAES_cfb(pIV, pSrc, pDst, nBlocks, cfbBlkSize, pCtx); else { int blksThreadReg; int blksThreadTail; int srcBlkSize; int ivBlkSize; Ipp8u locIV[MBS_RIJ128*DEFAULT_CPU_NUM]; #if defined (__INTEL_COMPILER) Ipp8u* pLocIV = nThreads>DEFAULT_CPU_NUM? kmp_malloc(nThreads*MBS_RIJ128) : locIV; #else Ipp8u* pLocIV = nThreads>DEFAULT_CPU_NUM ? malloc(nThreads*MBS_RIJ128) : locIV; #endif if(pLocIV) { #pragma omp parallel IPPCP_OMP_LIMIT_MAX_NUM_THREADS(nThreads) { #pragma omp master { int nt; nThreads = omp_get_num_threads(); blksThreadReg = nBlocks / nThreads; blksThreadTail = blksThreadReg + nBlocks % nThreads; srcBlkSize = blksThreadReg*cfbBlkSize; ivBlkSize = IPP_MIN(MBS_RIJ128,srcBlkSize); CopyBlock16(pIV, pLocIV+0); for(nt=1; nt<nThreads; nt++) CopyBlock(pSrc+nt*srcBlkSize-ivBlkSize, pLocIV+MBS_RIJ128+(nt-1)*ivBlkSize, ivBlkSize); } #pragma omp barrier { int id = omp_get_thread_num(); Ipp8u* pThreadIV = pLocIV + id*ivBlkSize; Ipp8u* pThreadSrc = (Ipp8u*)pSrc + id*srcBlkSize; Ipp8u* pThreadDst = (Ipp8u*)pDst + id*srcBlkSize; int blkThread = (id==(nThreads-1))? blksThreadTail : blksThreadReg; cpDecryptAES_cfb(pThreadIV, pThreadSrc, pThreadDst, blkThread, cfbBlkSize, pCtx); } } if(pLocIV != locIV) #if defined (__INTEL_COMPILER) kmp_free(pLocIV); #else free(pLocIV); #endif } else return ippStsMemAllocErr; } #endif return ippStsNoErr; } }
SybasePROP_fmt_plug.c
/* SybasePROP cracker. Hacked together during November of 2013 by Dhiru Kholia * <dhiru [at] openwall.com>. * * This software is Copyright (c) 2013, Dhiru Kholia <dhiru [at] openwall.com>, * Frank Benhamou, Gregory Terrien and Marcel Major and it is hereby released * to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * All credits for reversing this algorithm go to Marcel Major, Frank Benhamou * and Gregory Terrien. Dhiru Kholia just glued together the bits (as usual!). * * [1] http://www.nes.fr/securitylab/?p=1128 (in French!) * * [2] https://hacktivity.com/hu/letoltesek/archivum/57/ */ #if FMT_EXTERNS_H extern struct fmt_main fmt_sybaseprop; #elif FMT_REGISTERS_H john_register_one(&fmt_sybaseprop); #else #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "options.h" #include "syb-prop_repro.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 16 #endif static int omp_t = 1; #endif #include "memdbg.h" #define BLOCK_SIZE 8 #define FORMAT_LABEL "Sybase-PROP" #define FORMAT_NAME "" #define ALGORITHM_NAME "salted FEAL-8 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 64 #define CIPHERTEXT_LENGTH (6 + 56) #define PREFIX_VALUE "0x" #define PREFIX_LENGTH 2 #define BINARY_SIZE 56 / 2 #define BINARY_ALIGN 4 #define SALT_SIZE 1 // see the definition of generate_hash, note "unsigned char seed" argument #define SALT_SIZE_HEX 2 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 128 static struct fmt_tests SybasePROP_tests[] = { {"0x2905aeb3d00e3b80fb0695cb34c9fa9080f84ae1824b24cc51a3849dcb06", "test11"}, {"0x3f05fc3d526946d9936c63dd798c5fa1b980747b1d81d0b9b2e8197d2aca", "test12"}, {NULL} }; static unsigned char saved_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); if (omp_t > 1) { self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; } #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p = ciphertext + PREFIX_LENGTH; if (strncmp(ciphertext, PREFIX_VALUE, PREFIX_LENGTH)) return 0; if (hexlenl(p) != CIPHERTEXT_LENGTH-PREFIX_LENGTH) return 0; return 1; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = ciphertext + PREFIX_LENGTH + SALT_SIZE_HEX + 2; // last 2 bytes always seem to be "05" for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void *get_salt(char *ciphertext) { char *p = ciphertext + PREFIX_LENGTH; static unsigned char salt; salt = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; return (void*)&salt; } static void set_salt(void *salt) { saved_salt = ((unsigned char*)salt)[0]; } static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { generate_hash((unsigned char*)saved_key[index], saved_salt, (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } struct fmt_main fmt_sybaseprop = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, SybasePROP_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
jacobi-block-task.c
# include "poisson.h" /* #pragma omp task/taskwait version of SWEEP. */ void sweep (int nx, int ny, double dx, double dy, double *f_, int itold, int itnew, double *u_, double *unew_, int block_size) { int it; int block_x, block_y; if (block_size == 0) block_size = nx; int max_blocks_x = (nx / block_size); int max_blocks_y = (ny / block_size); #pragma omp parallel \ shared(u_, unew_, f_, max_blocks_x, max_blocks_y, nx, ny, dx, dy, itold, itnew, block_size) \ private(it, block_x, block_y) #pragma omp single { for (it = itold + 1; it <= itnew; it++) { // Save the current estimate. for (block_x = 0; block_x < max_blocks_x; block_x++) { for (block_y = 0; block_y < max_blocks_y; block_y++) { #pragma omp task shared(u_, unew_, nx, ny, block_size) firstprivate(block_x, block_y) copy_block(nx, ny, block_x, block_y, u_, unew_, block_size); } } #pragma omp taskwait // Compute a new estimate. for (block_x = 0; block_x < max_blocks_x; block_x++) { for (block_y = 0; block_y < max_blocks_y; block_y++) { #pragma omp task default(none) shared(u_, unew_, f_, dx, dy, nx, ny, block_size) firstprivate(block_x, block_y) compute_estimate(block_x, block_y, u_, unew_, f_, dx, dy, nx, ny, block_size); } } #pragma omp taskwait } } }
psr_profile.c
/***************************************************************************** * PSRGEOM * Sam McSweeney, 2018 * * This program attempts to simulate the profile as observed from a pulsar * with given angles α and ζ, period P, and frequency range f1 to f2. * * For each sampled field line, the line is followed outward in incremental * steps. At each step, the average beam pattern of emitting particles is * calculated, with the gamma factors of the particles drawn from a pre- * determined distribution. * * The final output is a two-column file: * 1) the rotation phase in degrees * 2) the profile power (in arbitrary units) * ****************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> #include <time.h> //#include <omp.h> #include "psrgeom.h" struct opts { double al_deg; // alpha angle in deg double ze_deg; // zeta angle in deg double P_sec; // period, in sec char *outfile; // name of output file (NULL means stdout) double s_start; // starting value of s double s_stop; // stopping value of s double p_start; // starting value of p double p_stop; // stopping value of p double f_start; // starting value of freq (MHz) double f_stop; // stopping value of freq (MHz) int open_only; // only consider open field lines int num_lines; // sample this many lines int nsparks; // number of sparks in carousel int dipole; // use dipole field? int nbins; // number of profile phase bins double P4_sec; // the rotation time of the carousel int csl_type; // the type of spark profile (TOPHAT or GAUSSIAN) }; void usage(); void parse_cmd_line( int argc, char *argv[], struct opts *o ); void print_col_headers( FILE *f ); int main( int argc, char *argv[] ) { // Seed the random number generator srand( time( NULL ) ); // Generic counter: int i; // Set up struct for command line options and set default values struct opts o; o.al_deg = NAN; o.P_sec = NAN; o.ze_deg = NAN; o.outfile = NULL; o.s_start = NAN; o.s_stop = NAN; o.p_start = 0.0; o.p_stop = 360.0; o.f_start = NAN; o.f_stop = NAN; o.open_only = 0; o.num_lines = 10000; o.nsparks = 0; o.dipole = 0; // use Deutsch field by default o.nbins = 1024; o.P4_sec = NAN; o.csl_type = GAUSSIAN; parse_cmd_line( argc, argv, &o ); // Set up output file FILE *f; if (o.outfile == NULL) f = stdout; else { f = fopen( o.outfile, "w" ); if (f == NULL) { fprintf( stderr, "error: could not open file %s\n", o.outfile ); exit(EXIT_FAILURE); } } // Set up pulsar pulsar psr; psr_angle *ra = NULL; psr_angle *dec = NULL; psr_angle *al = create_psr_angle_deg( o.al_deg ); psr_angle *ze = create_psr_angle_deg( o.ze_deg ); double P = o.P_sec; double r = 1e4; /* This will be used later as we move outwards from the pulsar surface */ set_pulsar( &psr, ra, dec, P, r, al, ze ); if (o.dipole) psr.field_type = DIPOLE; // Set up the carousel psr_angle s, S; set_psr_angle_deg( &S, (o.s_stop + o.s_start) / 2.0 ); set_psr_angle_deg( &s, (o.s_stop - o.s_start) / 2.0 ); set_pulsar_carousel( &psr, o.nsparks, &s, &S, o.csl_type, o.P4_sec ); // Write the file header print_psrg_header( f, argc, argv ); // Some needed variables double profile[o.nbins]; int bin_count[o.nbins]; int centre_bin = o.nbins/2; // Some default values int rL_norm = 0; // Reset profile to zero for (i = 0; i < o.nbins; i++) { profile[i] = 0.0; bin_count[i] = 0; } // Write the column headers print_col_headers( f ); //#pragma omp parallel for /* At the moment, this doesn't seem to help */ for (i = 0; i < o.num_lines; i++) { fprintf( stderr, "\r\r\r\r%3d%%", (int)(100.0*(double)i/(double)(o.num_lines-1)) ); int linetype; // either CLOSED_LINE or OPEN_LINE point foot_pt; point init_pt; // Obtain a random point on the pulsar surface random_spark_footpoint( &foot_pt, NULL, &psr, 0.0 ); // If requested, check that we're on an open field line if (o.open_only) { linetype = get_fieldline_type( &foot_pt, &psr, rL_norm, NULL, NULL, NULL ); if (linetype == CLOSED_LINE) { continue; } } // Now climb up the field line, emitting as we go // Start 1 metre above the surface Bstep( &foot_pt, &psr, 1.0, DIR_OUTWARD, &init_pt, NULL ); set_point_xyz( &init_pt, init_pt.x[0], init_pt.x[1], init_pt.x[2], POINT_SET_ALL ); fieldline_to_profile( &psr, &init_pt, o.f_start*1.0e6, o.f_stop*1.0e6, o.nbins, centre_bin, profile, bin_count ); } fprintf( stderr, "\n" ); // Print out the profile double phase_deg; double bin_width = 360.0 / (double)o.nbins; for (i = 0; i < o.nbins; i++) { // Convert bin number to phase phase_deg = (double)(i - centre_bin) * bin_width; fprintf( f, "%.15e %.15e %d\n", phase_deg, profile[i], bin_count[i] ); } // Clean up destroy_psr_angle( ra ); destroy_psr_angle( dec ); destroy_psr_angle( al ); destroy_psr_angle( ze ); free( o.outfile ); if (o.outfile != NULL) fclose( f ); return 0; } void usage() { printf( "usage: psr_visiblepoints [OPTIONS]\n\n" ); printf( "REQUIRED OPTIONS:\n" ); printf( " -a alpha The angle between the rotation and magetic axes " "in degrees (required)\n" ); printf( " -f f1:f2 The emission frequency, in MHz. " "The range is from f1 to f2.\n" ); printf( " -P period The rotation period of the pulsar, in seconds " "(required)\n" ); printf( " -s s1:s2 The angular distance from the magnetic axis, " "in degrees. The range is from s1 to s2.\n" ); printf( " -z zeta The angle between the rotation axis and the line " "of sight in degrees (required)\n" ); printf( " -4 P4 The carousel's rotation period (in sec)\n" ); printf( "\nOTHER OPTIONS:\n" ); printf( " -b nbins The number of bins in the output profile\n" ); printf( " -c type The spark profile type, either GAUSSIAN (default) " "or TOPHAT\n" ); printf( " -d Use a dipole field instead of the default " "Deutsch field\n" ); printf( " -h Display this help and exit\n" ); printf( " -n nlines Sample nlines magnetic field lines " "(default: 10000)\n" ); printf( " -N nsparks The number of sparks in the carousel. If nsparks " "= 0 (default), the footpoints are sampled " "uniformly in the range given by -s. Otherwise, " "the s-range is used to define the spark size.\n" ); printf( " -o outfile The name of the output file to write to. If not " "set, output will be written to stdout.\n" ); printf( " -O Only consider open field lines (default: off)\n" ); printf( " -p p1:p2 The azimuth relative to the magnetic axis, " "in degrees. The range is from p1 to p2. Ensure " "p1 < p2 [default = 0:360]\n" ); } void parse_cmd_line( int argc, char *argv[], struct opts *o ) { // Collect the command line arguments int c; while ((c = getopt( argc, argv, "a:b:c:df:hn:N:o:Op:P:s:S:z:4:")) != -1) { switch (c) { case 'a': o->al_deg = atof(optarg); break; case 'b': o->nbins = atoi(optarg); break; case 'c': if (strcmp( optarg, "GAUSSIAN" ) == 0) o->csl_type = GAUSSIAN; else if (strcmp( optarg, "TOPHAT" ) == 0) o->csl_type = TOPHAT; else { fprintf( stderr, "error: -c argument must be either " "GAUSSIAN or TOPHAT\n" ); exit(EXIT_FAILURE); } break; case 'd': o->dipole = 1; break; case 'f': parse_range( optarg, &(o->f_start), &(o->f_stop), NULL ); break; case 'h': usage(); exit(EXIT_SUCCESS); break; case 'n': o->num_lines = atoi(optarg); break; case 'N': o->nsparks = atoi(optarg); break; case 'o': o->outfile = strdup(optarg); break; case 'O': o->open_only = 1; break; case 'p': parse_range( optarg, &(o->p_start), &(o->p_stop), NULL ); break; case 'P': o->P_sec = atof(optarg); break; case 's': parse_range( optarg, &(o->s_start), &(o->s_stop), NULL ); break; case 'z': o->ze_deg = atof(optarg); break; case '4': o->P4_sec = atof(optarg); break; case '?': fprintf( stderr, "error: unknown option character '-%c'\n", optopt ); exit(EXIT_FAILURE); break; default: fprintf( stderr, "error: couldn't parse command line\n" ); exit(EXIT_FAILURE); } } // Check that all the arguments are valid if (isnan(o->al_deg) || isnan(o->P_sec) || isnan(o->ze_deg) || isnan(o->P4_sec)) { fprintf( stderr, "error: -a, -P, -z and -4 options required" "\n" ); usage(); exit(EXIT_FAILURE); } if (isnan(o->s_start) || isnan(o->f_start)) { fprintf( stderr, "error: -f and -s options required\n" ); usage(); exit(EXIT_FAILURE); } if (o->nsparks < 0) { fprintf( stderr, "error: -N (=%d) must be >= 0\n", o->nsparks ); exit(EXIT_FAILURE); } } void print_col_headers( FILE *f ) /* The final output includes: * 1) the rotation phase in degrees * 2) the profile power (in arbitrary units) */ { // Print out a line to file handle f fprintf( f, "# phase_deg power\n" ); }
O10BlkRed.c
#include <mpi.h> #include "grid.h" extern char *restrict levelMask; extern char *restrict * restrict levMsk; extern GVAL *restrict vcflMax; extern GVAL vcflMaxVal; extern struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_dvg; void O10BlkRed(GRID * g) { { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { if (gv_dvg->data_pointer.p3[(block_index)][(height_index)][(cell_index)] > 0.0) { levMsk[block_index][height_index] = 1; levelMask[height_index] = 1; vcflMax[block_index] = vcflMax[block_index] > gv_dvg->data_pointer.p3[(block_index)][(height_index)][(cell_index)] ? vcflMax[block_index] : gv_dvg->data_pointer.p3[(block_index)][(height_index)][(cell_index)]; } } } } } for (int b = 0; b < g->cBlkCnt; b++) { vcflMaxVal = vcflMaxVal > vcflMax[b] ? vcflMaxVal : vcflMax[b]; } }
gate.h
/** * @file gate.h * @author Nader KHAMMASSI - nader.khammassi@gmail.com * @date 02-10-15 * @brief */ #pragma once #ifndef QX_GATE_H #define QX_GATE_H #include <map> #include <inttypes.h> #include <immintrin.h> // avx #include <emmintrin.h> // sse #include <algorithm> #include "qx/core/hash_set.h" #include "qx/core/linalg.h" #include "qx/core/register.h" #include "qx/core/binary_counter.h" #include "qx/core/kronecker.h" #include "qx/compat.h" #include <chrono> #ifdef USE_OPENMP #include <omp.h> #endif // #ifndef __BUILTIN_LINALG__ // #include <boost/numeric/ublas/matrix.hpp> // #endif #define SQRT_2 (1.4142135623730950488016887242096980785696718753769480731766797379f) #define R_SQRT_2 (0.7071067811865475244008443621048490392848359376884740365883398690f) #define __bit_test(x,pos) ((x) & (1<<(pos))) #define __bit_set(x,pos) ((x) | (1<<(pos))) #define __bit_flip(x,pos) ((x) ^ (1<<(pos))) #define __bit_reset(x,pos) ((x) & ~(1<<(pos))) #define __AVX__NO #define __OP_PREFETCH__ //#define SQRT_2 (1.41421356237309504880f) //#define R_SQRT_2 (0.70710678118654752440f) #define ROUND_DOWN(x, s) ((x) & ~((s)-1)) #define IS_ODD(x) (x & 1) namespace qx { /** * types definition */ typedef uint64_t basis_state_t; typedef std::map<basis_state_t,complex_t> quantum_state_t; typedef enum __gate_type_t { __identity_gate__, __hadamard_gate__, __pauli_x_gate__ , __pauli_y_gate__ , __pauli_z_gate__ , __cnot_gate__ , __toffoli_gate__ , __swap_gate__ , __phase_gate__ , __rx_gate__ , __ry_gate__ , __rz_gate__ , __cphase_gate__ , __t_gate__ , __tdag_gate__ , __sdag_gate__ , __custom_gate__ , __prepx_gate__ , __prepy_gate__ , __prepz_gate__ , __measure_gate__ , __measure_reg_gate__, __measure_x_gate__ , __measure_x_reg_gate__, __measure_y_gate__ , __measure_y_reg_gate__, __ctrl_phase_shift_gate__, __parallel_gate__, __display__, __display_binary__, __print_str__, __bin_ctrl_gate__, __lookup_table__, __classical_not_gate__, __qft_gate__, __prepare_gate__, __unitary_gate__ } gate_type_t; /** * gates coeffecients */ QX_ALIGNED(64) const complex_t cnot_c [] = { complex_t(1.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(1.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(1.0), complex_t(0.0), complex_t(0.0), complex_t(1.0), complex_t(0.0) }; /* CNOT */ QX_ALIGNED(64) const complex_t swap_c [] = { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }; /* SWAP */ QX_ALIGNED(64) const complex_t identity_c [] = { complex_t(1.0), complex_t(0.0), complex_t(0.0), complex_t(1.0) }; /* I */ QX_ALIGNED(64) const complex_t pauli_x_c [] = { complex_t(0.0, 0.0) , complex_t(1.0, 0.0), complex_t(1.0, 0.0) , complex_t(0.0, 0.0) }; /* X */ QX_ALIGNED(64) const complex_t pauli_y_c [] = { complex_t(0.0, 0.0) , complex_t(0.0,-1.0), complex_t(0.0, 1.0) , complex_t(0.0, 0.0) }; /* Y */ QX_ALIGNED(64) const complex_t pauli_z_c [] = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(-1.0,0.0) }; /* Z */ QX_ALIGNED(64) const complex_t phase_c [] = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(0.0, 1.0) }; /* S */ QX_ALIGNED(64) const complex_t sdag_gate_c[] = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(0.0, -1.0) }; /* S_dag */ QX_ALIGNED(64) const complex_t t_gate_c [] = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(cos(QX_PI/4),sin(QX_PI/4)) }; /* T */ QX_ALIGNED(64) const complex_t tdag_gate_c[] = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(cos(QX_PI/4),-sin(QX_PI/4)) }; /* T_dag */ QX_ALIGNED(64) const complex_t hadamard_c [] = { R_SQRT_2, R_SQRT_2, R_SQRT_2, -R_SQRT_2 }; /* H */ #define __rc(r,c,s) (r*s+c) /** * \brief common abstract gate interface for * all gates implementation. */ class gate { public: virtual int64_t apply(qu_register& qureg) = 0; virtual std::vector<uint64_t> qubits() = 0; virtual std::vector<uint64_t> control_qubits() = 0; virtual std::vector<uint64_t> target_qubits() = 0; virtual gate_type_t type() = 0; virtual std::string micro_code() { return "# unsupported operation : qubit out of range"; } virtual void dump() = 0; virtual ~gate() { }; virtual void set_duration(uint64_t d) { duration = d; } virtual uint64_t get_duration() { return duration; } protected: uint64_t duration; }; /** * \brief rotation in the x-z plane with a given * angle theta (see "Large scale simulation of * error-prone quantum systems" p.39" [Niwa 2002]) */ inline cmatrix_t rotation(double theta) { cmatrix_t r; // (2,2); r(0,0) = complex_t(cos(theta),0); r(0,1) = complex_t(-sin(theta),0); r(1,0) = complex_t(sin(theta),0); r(1,1) = complex_t(cos(theta),0); return r; } /** * \brief phase shift for a given angle phi */ inline cmatrix_t phase(double phi) { cmatrix_t p; // (2,2); p(0,0) = complex_t(1,0); p(0,1) = complex_t(0,0); p(1,0) = complex_t(0,0); p(1,1) = complex_t(cos(phi),sin(phi)); return p; } /** * \brief generate noisy hadamard gate */ cmatrix_t noisy_hadamard(double epsilon1=0, double epsilon2=0) { #ifdef __BUILTIN_LINALG__ return mxm(rotation(QX_PI/4 + epsilon1), phase(QX_PI + epsilon2)); #else cmatrix_t rz = rotation(QX_PI/4 + epsilon1); cmatrix_t p = phase(QX_PI + epsilon2); return mxm(rz,p); #endif } /** * \brief build n x n matrix from an array */ cmatrix_t build_matrix(const complex_t * c, uint64_t n) { // assert(n==2); // TO DO : remove the n parameter cmatrix_t m; // (n,n); for (std::size_t i=0; i<n; i++) for (std::size_t j=0; j<n; j++) m(i,j) = c[i*n+j]; return m; } /** * sqg_apply */ #ifdef QX_COMPACT_GATE_OP inline void sqg_apply(cmatrix_t & cm, uint64_t qubit, qu_register& qureg) { uint64_t n = qureg.size(); matrix_t m(2,row_t(2,0)); m[0][0] = cm(0,0); m[0][1] = cm(0,1); m[1][0] = cm(1,0); m[1][1] = cm(1,1); if (qubit == 0) { identity id(1UL << (n-1)); unitary_matrix um(cm.size1(),m); kronecker k(&id, &um); cvector_t r(qureg.get_data()); mulmv(k,qureg.get_data(),r); qureg = r; } else if (qubit == n-1) { identity id(1UL << (n-1)); unitary_matrix um(cm.size1(),m); kronecker k(&um, &id); cvector_t r(qureg.get_data()); mulmv(k,qureg.get_data(),r); qureg = r; } else { identity id1(1UL << (qubit)); identity id2(1UL << (n-qubit-1)); unitary_matrix um(cm.size1(),m); kronecker k(&id2, &um, &id1); cvector_t r(qureg.get_data()); mulmv(k,qureg.get_data(),r); qureg = r; } } /** * u on the kth qubit : * non-null value in each row of the kronocker matrix: * for each row r : * c1 = r || 000100 // 1 at the n-k bit * c2 = r || 000000 */ // #elif QX_SPARSE_MV_MUL #else // QX_SPARSE_MV_MUL uint64_t rw_process(uint64_t is, uint64_t ie, uint64_t s, uint64_t n, uint64_t qubit, const kronecker * m, cvector_t * v, cvector_t * res) { uint64_t k = n-qubit; // println("run : " << is << " .. " << ie); complex_t * pv = v->data(); complex_t * pr = res->data(); size_t nk = n-k; for (uint64_t r=is; r<ie; ++r) { size_t bc = r; size_t c1 = __bit_reset(bc,nk); size_t c2 = __bit_set(bc,nk); // complex_t s; // = 0; pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2)); } return 0; } void sparse_mulmv(uint64_t n, uint64_t qubit, const kronecker& m, cvector_t& v, cvector_t& res) { uint64_t k = n-qubit; uint64_t rows = (1UL << n); uint64_t z = 0UL; /*xpu::task rw_t(rw_process,0,0,0,n,qubit,&m,&v,&res); xpu::parallel_for process(z,rows,1,&rw_t); process.run();*/ rw_process(z,rows,0UL,n,qubit,&m,&v,&res); } void __apply_m(std::size_t start, std::size_t end, const std::size_t qubit, complex_t * state, const std::size_t stride0, const std::size_t stride1, const complex_t * matrix) { #if 0 __m128d m00 = matrix[0].xmm; __m128d m01 = matrix[1].xmm; __m128d m10 = matrix[2].xmm; __m128d m11 = matrix[3].xmm; #endif complex_t m00 = matrix[0]; complex_t m01 = matrix[1]; complex_t m10 = matrix[2]; complex_t m11 = matrix[3]; #ifdef USE_OPENMP #pragma omp parallel for // shared(m00,m01,m10,m11) #endif for(int64_t offset = start; offset < (int64_t)end; offset += (1UL << (qubit + 1))) for(size_t i = (size_t)offset; i < (size_t)offset + (1UL << qubit); i++) { size_t i0 = i + stride0; size_t i1 = i + stride1; complex_t in0 = state[i0]; complex_t in1 = state[i1]; state[i0] = m00*in0+m01*in1; state[i1] = m10*in0+m11*in1; #if 0 __m128d in0 = state[i0].xmm; __m128d in1 = state[i1].xmm; state[i0].xmm = _mm_add_pd(xpu::_mm_mulc_pd(m00, in0), xpu::_mm_mulc_pd(m10, in1)); state[i1].xmm = _mm_add_pd(xpu::_mm_mulc_pd(m10, in1), xpu::_mm_mulc_pd(m11, in1)); #endif } } #ifdef __SSE__ // #ifdef __FMA__ void __apply_x(std::size_t start, std::size_t end, const std::size_t qubit, complex_t * state, const std::size_t stride0, const std::size_t stride1, const complex_t * matrix) { #ifdef USE_OPENMP #pragma omp parallel for // private(m00,r00,neg) #endif for(int64_t offset = start; offset < (int64_t)end; offset += (1UL << (qubit + 1UL))) for(size_t i = (size_t)offset; i < (size_t)offset + (1UL << qubit); i++) { size_t i0 = i + stride0; size_t i1 = i + stride1; __m128d xin0 = state[i0].xmm; // _mm_load_pd((double*)&(state[i0].xmm)); // __m128d xin1 = state[i1].xmm; // _mm_load_pd((double*)&(state[i1].xmm)); state[i0].xmm = state[i1].xmm; state[i1].xmm = xin0; } } // #else // #error "FMA not available !" // #endif // FMA #else #error "SSE not available !" #endif // SSE #ifdef __SSE__ // #ifdef __FMA__ void __apply_h(std::size_t start, std::size_t end, const std::size_t qubit, complex_t * state, const std::size_t stride0, const std::size_t stride1, const complex_t * matrix) { __m128d m00 = matrix[0].xmm; __m128d r00 = _mm_shuffle_pd(m00,m00,3); // 1 cyc __m128d neg = _mm_set1_pd(-0.0f); #ifdef USE_OPENMP #pragma omp parallel for // private(m00,r00,neg) #endif for(int64_t offset = start; offset < (int64_t)end; offset += (1UL << (qubit + 1UL))) for(size_t i = (size_t)offset; i < (size_t)offset + (1UL << qubit); i++) { size_t i0 = i + stride0; size_t i1 = i + stride1; __m128d xin0 = state[i0].xmm; // _mm_load_pd((double*)&(state[i0].xmm)); __m128d xin1 = state[i1].xmm; // _mm_load_pd((double*)&(state[i1].xmm)); __m128d t2; // = _mm_shuffle_pd(m01,m01,3); // 1 cyc __m128d t1 = _mm_mul_pd(xin0,r00); // 5 cyc #ifdef __FMA__ __m128d xi0 = _mm_fmadd_pd (xin1,r00, t1); // x2*t2+t1 // 5 cyc #else __m128d xi0 = _mm_mul_pd(xin1,r00); xi0 = _mm_add_pd(xi0,t1); // x2*t2+t1 // 5 cyc #endif // __FMA__ // t2 = _mm_shuffle_pd(m11,m11,3); // 1 cyc t2 = _mm_xor_pd(r00,neg); // 1 cyc (m11=-m00) #ifdef __FMA__ __m128d xi1 = _mm_fmadd_pd (xin1, t2, t1); // x2*t2+t1 // 5 cyc #else __m128d xi1 = _mm_mul_pd(xin1,t2); xi1 = _mm_add_pd(xi1,t1); // x2*t2+t1 // 5 cyc #endif state[i0].xmm = xi0; // _mm_store_pd((double*)(&state[i0].xmm),xi0); state[i1].xmm = xi1; // _mm_store_pd((double*)(&state[i1].xmm),xi1); } } // #else // #error "FMA not available !" // #endif // FMA #else #error "SSE not available !" #endif // SSE uint64_t rw_process_ui(uint64_t is, uint64_t ie, uint64_t s, uint64_t n, uint64_t qubit, kronecker_ui m, cvector_t * v, cvector_t * res) { uint64_t k = n-qubit; // println("run : " << is << " .. " << ie); complex_t * pv = v->data(); complex_t * pr = res->data(); size_t bc, c1, c2; size_t nk = n-k; for (uint64_t r=is; r<ie; ++r) { bc = r; c1 = __bit_reset(bc,nk); c2 = __bit_set(bc,nk); bc++; #ifdef __OP_PREFETCH__ _mm_prefetch((char*)&pv[__bit_reset(bc,nk)],_MM_HINT_T0); _mm_prefetch((char*)&pv[__bit_set(bc,nk)],_MM_HINT_T0); #endif // __OP_PREFETCH__ #ifdef __AVX__ // cxc xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]); // cxr // pr[r].xmm = complex_t::_mm256_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm); #elif __SSE__ // complex_t s; // = 0; //pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2)); // --- cc mul add --- pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm); // --- cr mul add --- pr[r].xmm = _mm_add_pd(complex_t::mul_cr(pv[c1].xmm,m.get(r,c1).xmm), complex_t::mul_cr(pv[c2].xmm,m.get(r,c2).xmm)); // --- f. mul add --- // pr[r].xmm = complex_t::_mm_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm); #else pr[r] = (pv[c1]*(m.get(r,c1))) + (pv[c2]*(m.get(r,c2))); #endif } return 0; } void sparse_mulmv(uint64_t n, uint64_t qubit, kronecker_ui m, cvector_t& v, cvector_t& res) { uint64_t k = n-qubit; uint64_t rows = (1UL << n); uint64_t z = 0; #ifdef SEQUENTIAL rw_process_ui(z,rows,1,n,qubit,m,&v,&res); #else /*xpu::task rw_t(rw_process_ui,0,0,0,n,qubit,m,&v,&res); xpu::parallel_for process(z,rows,1,&rw_t); process.run();*/ rw_process_ui(z,rows,0,n,qubit,m,&v,&res); #endif } uint64_t rw_process_iu(uint64_t is, uint64_t ie, uint64_t s, uint64_t n, uint64_t qubit, kronecker_iu m, cvector_t * v, cvector_t * res) { uint64_t k = n-qubit; // println("run : " << is << " .. " << ie); complex_t * pv = v->data(); complex_t * pr = res->data(); size_t bc, c1, c2; size_t nk = n-k; for (uint64_t r=is; r<ie; ++r) { bc = r; c1 = __bit_reset(bc,nk); c2 = __bit_set(bc,nk); bc++; #ifdef __OP_PREFETCH__ _mm_prefetch((char*)&pv[__bit_reset(bc,nk)],_MM_HINT_T0); _mm_prefetch((char*)&pv[__bit_set(bc,nk)],_MM_HINT_T0); #endif // __OP_PREFETCH__ #ifdef __AVX__ // cxc xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]); // cxr // pr[r].xmm = complex_t::_mm256_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm); #elif __SSE__ // complex_t s; // = 0; // pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2)); // --- cc mul add --- pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm); // --- cr mul add --- pr[r].xmm = _mm_add_pd(complex_t::mul_cr(pv[c1].xmm,m.get(r,c1).xmm), complex_t::mul_cr(pv[c2].xmm,m.get(r,c2).xmm)); // --- f. mul add --- // pr[r].xmm = complex_t::_mm_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm); #else pr[r] = (pv[c1]*(m.get(r,c1))) + (pv[c2]*(m.get(r,c2))); #endif } return 0; } void sparse_mulmv(uint64_t n, uint64_t qubit, kronecker_iu m, cvector_t& v, cvector_t& res) { uint64_t k = n-qubit; uint64_t rows = (1UL << n); uint64_t z = 0; #ifdef SEQUENTIAL rw_process_iu(z,rows,1,n,qubit,m,&v,&res); #else /*xpu::task rw_t(rw_process_iu,0,0,0,n,qubit,m,&v,&res); xpu::parallel_for process(z,rows,1,&rw_t); process.run();*/ rw_process_iu(z,rows,0,n,qubit,m,&v,&res); #endif } // static xpu::core::os::mutex mtx; uint64_t rw_process_iui(uint64_t is, uint64_t ie, uint64_t s, uint64_t n, uint64_t qubit, kronecker_iui m, cvector_t * v, cvector_t * res) { uint64_t k = n-qubit; // println("run : " << is << " .. " << ie); complex_t * pv = v->data(); complex_t * pr = res->data(); size_t bc, c1, c2; size_t nk = n-k; for (uint64_t r=is; r<ie; r++) //+=2) { // 1st bc = r; c1 = __bit_reset(bc,nk); c2 = __bit_set(bc,nk); bc++; #ifdef __OP_PREFETCH__ _mm_prefetch((char*)&pv[__bit_reset(bc,nk)],_MM_HINT_T0); _mm_prefetch((char*)&pv[__bit_set(bc,nk)],_MM_HINT_T0); #endif // __OP_PREFETCH__ #ifdef __AVX__ // mtx.lock(); // cxc : xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]); // cxr // pr[r].xmm = complex_t::_mm256_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm); /* __m256d a; //_mm256_loadu2_m128d((double*)&pv[c1], (double*)&pv[c2]); a = _mm256_insertf128_pd(a,_mm_permute_pd(pv[c1].xmm,1), 0); a = _mm256_insertf128_pd(a,_mm_permute_pd(pv[c2].xmm,1), 1); print("(r="<<r<<") : pr12: "); xpu::dump_m256d(a); // __m256d b = _mm256_set_m128d((m.get(r,c1)).xmm, (m.get(r,c2)).xmm); __m256d b; b = _mm256_insertf128_pd(b,_mm_permute_pd(m.get(r,c1).xmm, 1), 1); print("(r="<<r<<") : c1 : "); xpu::dump_m256d(b); b = _mm256_insertf128_pd(b,_mm_permute_pd(m.get(r,c2).xmm, 1), 0); print("(r="<<r<<") : c2 : "); xpu::dump_m256d(b); __m256d ab = xpu::_mm256_cmul_pd(a,b); print("(r="<<r<<") : mul: "); xpu::dump_m256d(ab); __m256d abr = _mm256_permute2f128_pd(ab, ab, 1); print("(r="<<r<<") : prm: "); xpu::dump_m256d(abr); ab = _mm256_add_pd(ab,abr); print("(r="<<r<<") : add: "); xpu::dump_m256d(ab); pr[r].xmm = _mm256_extractf128_pd(ab,0); print("(r="<<r<<") : res:"); xpu::dump_m128d(pr[r].xmm); mtx.unlock(); */ #elif __SSE__ /* mtx.lock(); print("(r="<<r<<") : pr1: "); xpu::dump_m128d(pv[c1].xmm); print("(r="<<r<<") : pr2: "); xpu::dump_m128d(pv[c2].xmm); print("(r="<<r<<") : c1 : "); xpu::dump_m128d((m.get(r,c1)).xmm); print("(r="<<r<<") : c2 : "); xpu::dump_m128d((m.get(r,c2)).xmm); */ // --- cxc mul --- pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm); // --- cxr mul --- pr[r].xmm = _mm_add_pd(complex_t::mul_cr(pv[c1].xmm,m.get(r,c1).xmm), complex_t::mul_cr(pv[c2].xmm,m.get(r,c2).xmm)); // --- fus ma --- // pr[r].xmm = complex_t::_mm_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm); // pr[r].xmm = xpu::_mm128_mul_add_pc(pv[c1].xmm, pv[c2].xmm, m.get(r,c1).xmm, m.get(r,c2).xmm); /* print("(r="<<r<<") : res: "); xpu::dump_m128d(pr[r].xmm); mtx.unlock(); */ #else pr[r] = (pv[c1]*(m.get(r,c1))) + (pv[c2]*(m.get(r,c2))); #endif /* // 2nd c1 = __bit_reset(bc,n-k); c2 = __bit_set(bc,n-k); #ifdef __AVX__NO a = _mm256_loadu2_m128d((double*)&pv[c1], (double*)&pv[c2]); // __m256d b = _mm256_set_m128d((m.get(r,c1)).xmm, (m.get(r,c2)).xmm); b = _mm256_insertf128_pd(b,(m.get(bc,c1)).xmm, 1); b = _mm256_insertf128_pd(b,(m.get(bc,c2)).xmm, 0); ab = xpu::_mm256_cmul_pd(a,b); abr = _mm256_permute2f128_pd(ab, ab, 1); ab = _mm256_add_pd(ab,abr); pr[bc].xmm = _mm256_extractf128_pd(ab,0); #elif __SSE__ pr[bc].xmm = _mm_add_pd((pv[c1]*(m.get(bc,c1))).xmm, (pv[c2]*(m.get(bc,c2))).xmm); #else pr[bc] = (pv[c1]*(m.get(bc,c1))) + (pv[c2]*(m.get(bc,c2))); #endif */ } return 0; } void sparse_mulmv(uint64_t n, uint64_t qubit, kronecker_iui m, cvector_t& v, cvector_t& res) { uint64_t k = n-qubit; uint64_t rows = (1UL << n); uint64_t z = 0; #ifdef SEQUENTIAL rw_process_iui(z,rows,1,n,qubit,m,&v,&res); #else /*xpu::task rw_t(rw_process_iui,0,0,0,n,qubit,m,&v,&res); xpu::parallel_for process(z,rows,1,&rw_t); process.run();*/ #endif } inline void sqg_apply(cmatrix_t & cm, uint64_t qubit, qu_register& qureg) { uint64_t n = qureg.size(); complex_t * s = qureg.get_data().data(); // cm.dump(); __apply_m(0, (1UL << n), qubit, s, 0, (1UL << qubit), cm.m); return; } #endif // remove naive tensor computation typedef enum { __x180__, __x90__ , __y180__, __y90__ , __ym90__ } elementary_operation_t; static const char * pulse_lt[][5] = { { " pulse 9,0,0", " pulse 10,0,0", " pulse 11,0,0", " pulse 12,0,0", " pulse 14,0,0" }, { " pulse 0,9,0", " pulse 0,10,0", " pulse 0,11,0", " pulse 0,12,0", " pulse 0,14,0" }, { " pulse 0,0,9", " pulse 0,0,10", " pulse 0,0,11", " pulse 0,0,12", " pulse 0,0,14" }, }; /** * \brief hadamard gate: * * | 1 1| * 1/sqrt(2) | | * | 1 -1| */ class hadamard : public gate { private: uint64_t qubit; cmatrix_t m; public: hadamard(uint64_t qubit) : qubit(qubit) //,m((complex_t*)hadamard_c) { m = build_matrix(hadamard_c,2); } int64_t apply(qu_register& qureg) { size_t qs = qureg.states(); complex_t * data = qureg.get_data().data(); // sqg_apply(m,qubit,qureg); __apply_h(0, qs, qubit, data, 0, (1UL << qubit), hadamard_c); // __apply_m(0, qs, qubit, data, 0, (1 << qubit), hadamard_c); //__apply_h_old(0, qs, qubit, data, 0, (1 << qubit), hadamard_c); // qureg.set_binary(qubit,__state_unknown__); qureg.set_measurement_prediction(qubit,__state_unknown__); return 0; } std::string micro_code() { /** | wait 5 | y90 q0 --> { pulse 12,0,0 } | wait 5 | x180 q0 --> { pulse 9,0,0 } */ if (qubit > 2) return "# unsupported operation : qubit out of range"; std::stringstream uc; uc << pulse_lt[qubit][__y90__] << "\n"; uc << " wait 4 \n"; uc << pulse_lt[qubit][__x180__] << "\n"; uc << " wait 4 \n"; return uc.str(); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __hadamard_gate__; } void dump() { println(" [-] hadamard(q=" << qubit << ")"); } }; inline void __swap(cvector_t& amp, size_t size, size_t bit, size_t trg, size_t ctrl, size_t offset=0) { // println("bit=" << bit); // println("ctrl=" << ctrl); complex_t * p = amp.data(); size_t incrementer = 1UL << (bit+1); if ((1UL<<bit) == 1) { for (size_t i=__bit_set(0,bit), end=(1UL<<size); i<end; i+=incrementer) { size_t v = i+offset; std::swap(amp[v], amp[__bit_reset(v,trg)]); } } else { for (size_t i=__bit_set(0,bit), end=(1UL<<size); i<end; i+=incrementer) { size_t v = i+offset; for (size_t j=0; j<(1UL<<bit); j++) { // v += j; /* #ifdef __SSE__ __m128d x = _mm_load_pd((const double *)&p[v]); __m128d y = _mm_load_pd((const double *)&p[__bit_reset(v,trg)]); _mm_store_pd((double *)&p[__bit_reset(v,trg)],x); _mm_store_pd((double *)&p[v],y); #else */ std::swap(amp[v], amp[__bit_reset(v,trg)]); ++v; // println("swap("<<v<<","<<__bit_reset(v,trg)<<")"); // #endif } } } } inline int cx_worker(uint64_t cs, uint64_t ce, uint64_t s, cvector_t * p_amp, size_t bit1, size_t bit2, size_t trg, size_t ctrl) { cvector_t &amp = * p_amp; // xpu::parallel_for fswp(__bit_set(0,b1), (1 << qn), (1 << (b1+1)), &t); size_t step=(1UL << (bit1+1)); // size_t b = cs; // size_t e = ce; size_t offset = __bit_set(0,bit1); //for (size_t i=__bit_set(0,bit1); i<(1<<size); i += (1 << (bit1+1))) //__swap(amp,bit1,bit2,trg,ctrl,i); // for (size_t i=b; i<e; i++) // __swap(amp,bit1,bit2,trg,ctrl,offset+(i*step)); for (size_t i=cs; i<ce; i++) __swap(amp,bit1,bit2,trg,ctrl,offset+(i*step)); return 0; } /** * \brief controlled-not gate: * * | 1 0 0 0 | * | 0 1 0 0 | * | 0 0 0 1 | * | 0 0 1 1 | */ class cnot : public gate { private: uint64_t control_qubit; uint64_t target_qubit; cmatrix_t m; public: cnot(uint64_t ctrl_q, uint64_t target_q) : control_qubit(ctrl_q), target_qubit(target_q) { // m = build_matrix(cnot_c,4); // stack smaching } // #define CG_HASH_SET //#define CG_MATRIX #ifndef CG_BC #ifndef CG_MATRIX #define CG_BC #endif #endif // CG_BC int64_t apply(qu_register& qreg) { // println("cnot " << control_qubit << "," << target_qubit); #ifdef CG_MATRIX uint64_t sn = qreg.states(); uint64_t qn = qreg.size(); uint64_t cq = control_qubit; uint64_t tq = target_qubit; cmatrix_t i = cidentity_t(sn); perm_t p = perms(qn,cq,tq); // dump_matrix(i); for (perm_t::iterator it = p.begin(); it != p.end(); it++) { i(it->first,it->second) = 1; i(it->second,it->first) = 1; i(it->first, it->first) = 0; i(it->second,it->second) = 0; } // dump_matrix(i); qreg = mxv(i, qreg.get_data()); #elif defined(CG_BC) uint64_t sn = qreg.states(); uint64_t qn = qreg.size(); uint64_t cq = control_qubit; uint64_t tq = target_qubit; cvector_t& amp = qreg.get_data(); // perms(qn,cq,tq,amp); // #if 0 size_t b1 = std::max(cq,tq); size_t b2 = std::min(cq,tq); size_t steps = ((1UL << qn)-(__bit_set(0,b1)))/(1UL << (b1+1))+1; /* println("from=" << (__bit_set(0,b1))); println("to=" << (1 << qn)); println("s=" << (1 << (b1+1))); println("steps=" << steps); */ if (qn<17) fast_cx(amp, qn, b1, b2, tq, cq); else { #ifdef USE_OPENMP #pragma omp parallel { #ifndef _MSC_VER #pragma omp for simd #endif for (size_t i=0; i<steps; ++i) cx_worker(i,i+1,1UL,&amp,b1,b2,(size_t)tq,(size_t)cq); } #else xpu::task t(cx_worker,0UL,0UL,0UL,&amp,b1,b2,(size_t)tq,(size_t)cq); xpu::parallel_for fswp(0, steps, 1, &t); fswp.run(); #endif } // #endif #elif defined(CG_HASH_SET) uint64_t j = control_qubit+1; uint64_t k = target_qubit+1; uint64_t k2 = (1UL << (k-1)); uint64_t j2 = (1UL << (j-1)); uint64_t r_size = qreg.states(); xpu::container::hash_set<uint64_t> swap_set; // find swap pairs for (uint64_t t = 0; t < r_size; t++) { if ((t & j2) <= 0) continue; if (swap_set.find(t-k2) == swap_set.end()) swap_set.insert(t); } int64_t t2; cvector_t& amp = qreg.get_data(); complex_t c1(0., 0.), c2(0., 0.); for (xpu::container::hash_set<uint64_t>::iterator t = swap_set.begin(); t != swap_set.end(); ++t) { int64_t _t = *t; t2 = (_t + k2 < r_size) ? _t + k2 : _t - k2; c1 = amp(_t); c2 = amp(t2); std::swap(c1, c2); amp(_t) = c1; amp(t2) = c2; } //qreg=amp; #endif // CG_HASH_SET // if (qreg.get_binary(control_qubit) == __state_1__) if (qreg.get_measurement_prediction(control_qubit) == __state_1__) qreg.flip_binary(target_qubit); //else if (qreg.get_binary(control_qubit) == __state_unknown__) else if (qreg.get_measurement_prediction(control_qubit) == __state_unknown__) qreg.set_measurement_prediction(target_qubit,__state_unknown__); // qreg.set_binary(target_qubit,__state_unknown__); return 0; } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(control_qubit); r.push_back(target_qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; r.push_back(control_qubit); return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(target_qubit); return r; } gate_type_t type() { return __cnot_gate__; } void dump() { println(" [-] cnot(ctrl_qubit=" << control_qubit << ", target_qubit=" << target_qubit << ")"); } private: #if 0 void __swap(cvector_t& amp, size_t size, size_t bit, size_t trg, size_t ctrl, size_t offset=0) { // println("bit=" << bit); // println("ctrl=" << ctrl); for (size_t i=__bit_set(0,bit); i<(1UL<<size); i += (1UL << (bit+1))) for (size_t j=0; j<(1<<bit); j++) { size_t v = i+j+offset; std::swap(amp[v], amp[__bit_reset(v,trg)]); // println(" swap(" << std::bitset<16>(v) << "," << std::bitset<16>(__bit_reset(v,trg)) << ")"); } } #endif void fast_cx(cvector_t& amp, size_t size, size_t bit1, size_t bit2, size_t trg, size_t ctrl) { /* println("from=" << (__bit_set(0,bit1))); println("to=" << (1 << size)); println("s=" << (1 << (bit1+1))); */ for (size_t i=__bit_set(0,bit1); i<(1UL<<size); i += (1UL << (bit1+1))) __swap(amp,bit1,bit2,trg,ctrl,i); } }; template<typename T> void swap_if_greater(T& a, T& b) { if (a > b) { T tmp(a); a = b; b = tmp; } } template<typename T> void sort(T& a, T& b, T& c) { swap_if_greater(a, b); swap_if_greater(a, c); swap_if_greater(b, c); } /** * \brief toffoli gate: * * | 1 0 0 0 | * | 0 1 0 0 | * | 0 0 0 1 | * | 0 0 1 1 | */ class toffoli : public gate { private: uint64_t control_qubit_1; uint64_t control_qubit_2; uint64_t target_qubit; public: toffoli(uint64_t ctrl_q1, uint64_t ctrl_q2, uint64_t target_q) : control_qubit_1(ctrl_q1), control_qubit_2(ctrl_q2), target_qubit(target_q) { } int64_t apply(qu_register& qreg) { uint64_t sn = qreg.states(); uint64_t qn = qreg.size(); uint64_t cq1 = control_qubit_1; uint64_t cq2 = control_qubit_2; uint64_t tq = target_qubit; cvector_t& amp = qreg.get_data(); //println("\ntoffoli " << cq1 << "," << cq2 << "," << tq); #if 1 size_t c1=cq1; size_t c2=cq2; size_t c3=tq; size_t t=tq; size_t size=qn; sort(c1,c2,c3); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int64_t i=__bit_set(__bit_set(__bit_set(0,c1),c2),c3); i<(int64_t)(1UL<<size); i += (1UL << (c3+1))) for (size_t j=(size_t)i; j<((size_t)i+(1UL<<c3)); j += (1UL << (c2+1))) for (size_t k=j; k<(j+(1UL<<c2)); k+=(1UL << (c1+1))) for (size_t l=k; l<(k+(1UL<<(c1))); l++) { std::swap(amp[__bit_set(l,t)],amp[__bit_reset(l,t)]); // println("swap : " << __bit_set(l,t) << "," << __bit_reset(l,t)); } #else std::vector<uint64_t> done(sn, 0); perm_t p = perms(qn,cq1,cq2,tq); uint64_t p1,p2; for (perm_t::iterator it = p.begin(); it != p.end(); it++) { p1 = it->first; p2 = it->second; if (!(done[p1] || done[p2])) //if (!(done[p1])) { // std::swap(amp(p1),amp(p2)); // ublas std::swap(amp[p1],amp[p2]); //println("swap : " << p1 << "," << p2); done[p1] = 1; done[p2] = 1; } } #endif if ((qreg.get_measurement_prediction(control_qubit_1) == __state_1__) && (qreg.get_measurement_prediction(control_qubit_2) == __state_1__) ) { qreg.flip_binary(target_qubit); } else if ((qreg.get_measurement_prediction(control_qubit_1) == __state_unknown__) || (qreg.get_measurement_prediction(control_qubit_2) == __state_unknown__) ) { qreg.set_measurement_prediction(target_qubit,__state_unknown__); // qreg.set_binary(target_qubit,__state_unknown__); } return 0; } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(control_qubit_1); r.push_back(control_qubit_2); r.push_back(target_qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; r.push_back(control_qubit_1); r.push_back(control_qubit_2); return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(target_qubit); return r; } gate_type_t type() { return __toffoli_gate__; } void dump() { println(" [-] toffoli(ctrl_qubit_1=" << control_qubit_1 << ", ctrl_qubit_2=" << control_qubit_2 << ", target_qubit=" << target_qubit << ")"); } }; int fliper(int cs, int ce, int s, uint64_t q, cvector_t * p_amp) { cvector_t &amp = * p_amp; for (int i=cs; i<ce; ++i) { if (__bit_test(i,q)) std::swap(amp[i],amp[__bit_flip(i,q)]); } return 0; } #define __swap_xmm(x,y) { x = _mm_xor_pd(x,y); y = _mm_xor_pd(y,x); x = _mm_xor_pd(x,y); } void fast_flip(uint64_t q, uint64_t n, cvector_t& amp) { complex_t * x = amp.data(); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int64_t i=0; i<(int64_t)(1UL << n); i+=(1UL << (q+1))) for (size_t j=(size_t)i; j<((size_t)i+(1UL << q)); j++) //__swap_xmm(x[j].xmm,x[__bit_flip(j,q)].xmm); std::swap(x[j].xmm,x[__bit_flip(j,q)].xmm); } void flip(uint64_t q, uint64_t n, cvector_t& amp) { uint64_t nn = (1UL << n); uint64_t p1, p2; std::bitset<MAX_QB_N> b; // perm_t res; b.reset(); b.set(q); uint64_t bc = b.to_ulong(); while (bc < nn) { b.set(q); p1 = b.to_ulong(); b.flip(q); p2 = b.to_ulong(); if (p2<p1) std::swap(amp[p1],amp[p2]); b.flip(q); b = inc(b); b.set(q); bc = b.to_ulong(); } //return res; } /** * \brief identity : * * | 1 0 | * | 0 1 | * */ class identity : public gate { private: uint64_t qubit; cmatrix_t m; public: identity(uint64_t qubit) : qubit(qubit) { m = build_matrix(identity_c,2); } int64_t apply(qu_register& qreg) { return 0; } std::string micro_code() { if (qubit > 2) return "# unsupported operation : qubit out of range"; std::stringstream uc; // uc << pulse_lt[qubit][__x180__] << "\n"; uc << " wait 4 \n"; return uc.str(); } void dump() { println(" [-] identity(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __identity_gate__; } }; /** * \brief pauli-x : * * | 0 1 | * | 1 0 | * */ class pauli_x : public gate { private: uint64_t qubit; cmatrix_t m; public: pauli_x(uint64_t qubit) : qubit(qubit) { m = build_matrix(pauli_x_c,2); } int64_t apply(qu_register& qreg) { // #define FAST_FLIP #ifdef FAST_FLIP uint64_t qn = qreg.size(); cvector_t& amp = qreg.get_data(); // flip(qubit,qn,amp); fast_flip(qubit,qn,amp); /* xpu::task flip_t(fliper,0,0,0,qubit,&amp); xpu::parallel_for parallel_flip(0,(1 << qn),1,&flip_t); parallel_flip.run(); */ #else uint64_t n = qreg.size(); complex_t * s = qreg.get_data().data(); // cm.dump(); __apply_m(0, (1UL << n), qubit, s, 0, (1UL << qubit), m.m); // sqg_apply(m,qubit,qreg); #endif // FAST_FLIP qreg.flip_binary(qubit); return 0; } std::string micro_code() { /** | wait 5 | x180 q0 --> { pulse 9,0,0 } */ if (qubit > 2) return "# unsupported operation : qubit out of range"; std::stringstream uc; uc << pulse_lt[qubit][__x180__] << "\n"; uc << " wait 4 \n"; return uc.str(); } void dump() { println(" [-] pauli-x(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __pauli_x_gate__; } }; /** * \brief pauli-y : * * | 0 -i | * | i 0 | */ class pauli_y : public gate { private: uint64_t qubit; cmatrix_t m; public: pauli_y(uint64_t qubit) : qubit(qubit) { m = build_matrix(pauli_y_c,2); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); qreg.flip_binary(qubit); return 0; } std::string micro_code() { /** | wait 5 | x180 q0 --> { pulse 9,0,0 } */ if (qubit > 2) return "# unsupported operation : qubit out of range"; std::stringstream uc; uc << pulse_lt[qubit][__y180__] << "\n"; uc << " wait 4 \n"; return uc.str(); } void dump() { println(" [-] pauli-y(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __pauli_y_gate__; } }; /** * \brief pauli-z : * * | 1 0 | * | 0 -1 | */ class pauli_z : public gate { private: uint64_t qubit; cmatrix_t m; public: pauli_z(uint64_t qubit) : qubit(qubit) { m = build_matrix(pauli_z_c,2); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); return 0; } std::string micro_code() { /** | wait 5 | x180 q0 --> { pulse 9,0,0 } */ if (qubit > 2) return "# unsupported operation : qubit out of range"; std::stringstream uc; uc << pulse_lt[qubit][__y180__] << "\n"; uc << " wait 4 \n"; uc << pulse_lt[qubit][__x180__] << "\n"; uc << " wait 4 \n"; return uc.str(); } void dump() { println(" [-] pauli-z(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __pauli_z_gate__; } }; /** * \brief phase : * * | 1 0 | * | 0 i | */ class phase_shift : public gate { private: uint64_t qubit; cmatrix_t m; public: phase_shift(uint64_t qubit) : qubit(qubit) { m = build_matrix(phase_c,2); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); return 0; } std::string micro_code() { if (qubit > 2) return "# unsupported operation : qubit out of range"; std::stringstream uc; uc << pulse_lt[qubit][__y90__] << "\n"; uc << " wait 4 \n"; uc << pulse_lt[qubit][__x90__] << "\n"; uc << " wait 4 \n"; uc << pulse_lt[qubit][__ym90__] << "\n"; uc << " wait 4 \n"; return uc.str(); } void dump() { println(" [-] phase(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __phase_gate__; } }; /** * \brief S dag gate */ class s_dag_gate : public gate { private: uint64_t qubit; cmatrix_t m; public: s_dag_gate(uint64_t qubit) : qubit(qubit) { m = build_matrix(sdag_gate_c,2); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); return 0; } void dump() { println(" [-] s_dag_gate(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __sdag_gate__; } }; /** * \brief T gate */ class t_gate : public gate { private: uint64_t qubit; cmatrix_t m; public: t_gate(uint64_t qubit) : qubit(qubit) { m = build_matrix(t_gate_c,2); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); return 0; } void dump() { println(" [-] t_gate(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __t_gate__; } }; /** * \brief T dag gate */ class t_dag_gate : public gate { private: uint64_t qubit; cmatrix_t m; public: t_dag_gate(uint64_t qubit) : qubit(qubit) { m = build_matrix(tdag_gate_c,2); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); return 0; } void dump() { println(" [-] t_dag_gate(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __tdag_gate__; } }; /** * phase factoring */ void reset_gphase(cmatrix_t& m) { double n = m(0,0).norm(); if (n > 10e-9) { complex_t p(m(0,0).re/n,m(0,0).im/n); m(0,0) /= p; m(0,1) /= p; m(1,0) /= p; m(1,1) /= p; } else { n = m(0,1).norm(); complex_t p(m(0,1).re/n,m(0,1).im/n); m(0,0) /= p; m(0,1) /= p; m(1,0) /= p; m(1,1) /= p; } double n1 = std::sqrt(m(0,0).norm()+m(1,0).norm()); double n2 = std::sqrt(m(0,1).norm()+m(1,1).norm()); m(0,0) /= n1; m(0,1) /= n2; m(1,0) /= n1; m(1,1) /= n2; } /** * | (cos(?/2) -e(i?)sin(?/2)) | * general gate u = | | * | (e(i?)sin(?/2) e(i?+i?)cos(?/2)) | */ class unitary : public gate { private: uint64_t qubit; double angle[3]; cmatrix_t m; public: unitary(uint64_t qubit, double angle[3]) : qubit(qubit) { // m.resize(2,2); m(0,0) = cos(angle[1]/2); m(0,1) = complex_t(-cos(angle[2]/2),-sin(angle[2]/2))*sin(angle[1]/2); m(1,0) = complex_t(cos(angle[3]/2),sin(angle[3]/2))*sin(angle[1]/2) ; m(1,1) = complex_t(cos((angle[3]/2)+(angle[2]/2)),sin((angle[3]/2)+(angle[2]/2)))*cos(angle[1]/2); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); qreg.set_measurement_prediction(qubit,__state_unknown__); // qreg.set_binary(qubit,__state_unknown__); return 0; } double get_angle() { return *angle; } void dump() { println(" [-] unitary(qubit=" << qubit << ", angle=" << angle << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __unitary_gate__; } }; /** * \brief rotation-x : */ class rx : public gate { private: uint64_t qubit; double angle; cmatrix_t m; public: rx(uint64_t qubit, double angle) : qubit(qubit), angle(angle) { // m.resize(2,2); m(0,0) = cos(angle/2); m(0,1) = complex_t(0,-sin(angle/2)); m(1,0) = complex_t(0,-sin(angle/2)); m(1,1) = cos(angle/2); reset_gphase(m); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); qreg.set_measurement_prediction(qubit,__state_unknown__); // qreg.set_binary(qubit,__state_unknown__); return 0; } void dump() { println(" [-] rx(qubit=" << qubit << ", angle=" << angle << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __rx_gate__; } }; /** * \brief rotation-y : */ class ry : public gate { private: uint64_t qubit; double angle; cmatrix_t m; public: ry(uint64_t qubit, double angle) : qubit(qubit), angle(angle) { // m.resize(2,2); m(0,0) = cos(angle/2); m(0,1) = -sin(angle/2); m(1,0) = sin(angle/2); m(1,1) = cos(angle/2); // reset_gphase(m); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); qreg.set_measurement_prediction(qubit,__state_unknown__); //qreg.set_binary(qubit,__state_unknown__); return 0; } void dump() { println(" [-] ry(qubit=" << qubit << ", angle=" << angle << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __ry_gate__; } }; /** * \brief rotation-z : */ class rz : public gate { private: uint64_t qubit; double angle; cmatrix_t m; public: rz(uint64_t qubit, double angle) : qubit(qubit), angle(angle) { // m.resize(2,2); m(0,0) = complex_t(cos(-angle/2), sin(-angle/2)); m(0,1) = 0; m(1,0) = 0; m(1,1) = complex_t(cos(angle/2), sin(angle/2)); reset_gphase(m); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); qreg.set_measurement_prediction(qubit,__state_unknown__); //qreg.set_binary(qubit,__state_unknown__); return 0; } void dump() { println(" [-] rz(qubit=" << qubit << ", angle=" << angle << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __rz_gate__; } }; void __shift(cvector_t& amp, size_t size, size_t bit, complex_t p, size_t offset=0) { // println("bit=" << bit); // println("ctrl=" << ctrl); complex_t * x = amp.data(); // println(">>>> " << p); for (size_t i=__bit_set(0,bit); i<(1UL<<size); i += (1UL << (bit+1))) for (size_t j=0; j<(1UL<<bit); j++) { size_t v = i+j+offset; // amp[v] *= p; // println(" before mul : " << x[v]); x[v] *= p; // println(" after mul : " << x[v]); // println(" swap(" << std::bitset<16>(v) << "," << std::bitset<16>(__bit_reset(v,trg)) << ")"); } } void __shift(complex_t * x, size_t size, size_t bit, complex_t p, size_t offset=0) { // println("bit=" << bit); // println("ctrl=" << ctrl); for (size_t i=__bit_set(0,bit); i<(1UL<<size); i += (1UL << (bit+1))) for (size_t j=0; j<(1UL<<bit); j++) { size_t v = i+j+offset; // amp[v] *= p; x[v] *= p; // println(" swap(" << std::bitset<16>(v) << "," << std::bitset<16>(__bit_reset(v,trg)) << ")"); } } int shift_worker(int cs, int ce, int s, cvector_t * p_amp, size_t bit1, size_t bit2, complex_t p) { cvector_t &amp = * p_amp; // xpu::parallel_for fswp(__bit_set(0,b1), (1 << qn), (1 << (b1+1)), &t); size_t step=(1UL << (bit1+1)); size_t b = cs; size_t e = ce; size_t offset = __bit_set(0,bit1); //for (size_t i=__bit_set(0,bit1); i<(1<<size); i += (1 << (bit1+1))) //__swap(amp,bit1,bit2,trg,ctrl,i); for (size_t i=b; i<e; i++) __shift(amp,bit1,bit2,p,offset+(i*step)); return 0; } uint64_t qft_1st_fold_worker(uint64_t is, uint64_t ie, uint64_t s, uint64_t n, uint64_t qubit, kronecker_ui m, cvector_t * v, cvector_t * res) { uint64_t k = n-qubit; // println("run : " << is << " .. " << ie); complex_t * pv = v->data(); complex_t * pr = res->data(); size_t bc, c1, c2; for (uint64_t r=is; r<ie; ++r) { bc = r; c1 = __bit_reset(bc,n-k); c2 = __bit_set(bc,n-k); #ifdef __OP_PREFETCH__ _mm_prefetch((char*)&pv[__bit_reset((bc+1),n-k)],_MM_HINT_T0); _mm_prefetch((char*)&pv[__bit_set((bc+1),n-k)],_MM_HINT_T0); #endif // __OP_PREFETCH__ #ifdef __AVX__ //NO xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]); #else // complex_t s; // = 0; //pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2)); pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm); #endif } size_t bit2 = qubit; for (size_t j=qubit+1; j<n; ++j) { complex_t p(cos(QX_PI/(1UL << (j-qubit))), sin(QX_PI/(1UL << (j- qubit)))); size_t bit1 = j; size_t step=(1UL << (bit1+1)); size_t offset = __bit_set(0,bit1); for (size_t i=is; i<ie; i++) { // println("i=" << i*step); __shift(pr,bit1,bit2,p,offset+(i*step)); } } return 0; } void qft_1st_fold(uint64_t n, uint64_t qubit, kronecker_ui m, cvector_t& v, cvector_t& res) { uint64_t k = n-qubit; uint64_t rows = (1UL << n); uint64_t z = 0; //xpu::task qf_t(qft_fold_worker,0,0,0,n,qubit,m,&v,&res); //xpu::parallel_for process(z,rows,1,&qf_t); //process.run(); static const uint64_t SIZE = 1000; #ifdef USE_OPENMP #pragma omp parallel for #endif for (int64_t batch = 0; batch <= rows / SIZE; batch++) { qft_1st_fold_worker(batch*SIZE,std::min<uint64_t>((batch+1)*SIZE,rows),1,n,qubit,m,&v,&res); } } uint64_t qft_nth_fold_worker(uint64_t is, uint64_t ie, uint64_t s, uint64_t n, uint64_t qubit, kronecker_iui m, cvector_t * v, cvector_t * res) { uint64_t k = n-qubit; // println("run : " << is << " .. " << ie); complex_t * pv = v->data(); complex_t * pr = res->data(); size_t bc, c1, c2; for (uint64_t r=is; r<ie; ++r) { bc = r; c1 = __bit_reset(bc,n-k); c2 = __bit_set(bc,n-k); #ifdef __OP_PREFETCH__ _mm_prefetch((char*)&pv[__bit_reset((bc+1),n-k)],_MM_HINT_T0); _mm_prefetch((char*)&pv[__bit_set((bc+1),n-k)],_MM_HINT_T0); #endif // __OP_PREFETCH__ #ifdef __AVX__ //NO xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]); #else // complex_t s; // = 0; //pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2)); pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm); #endif } size_t bit2 = qubit; for (size_t j=qubit+1; j<n; ++j) { complex_t p(cos(QX_PI/(1UL << (j-qubit))), sin(QX_PI/(1UL << (j-qubit)))); size_t bit1 = j; size_t step=(1UL << (bit1+1)); size_t offset = __bit_set(0,bit1); for (size_t i=is; i<ie; i++) { __shift(pr,bit1,bit2,p,offset+(i*step)); } } return 0; } void qft_nth_fold(uint64_t n, uint64_t qubit, kronecker_iui m, cvector_t& v, cvector_t& res) { uint64_t k = n-qubit; uint64_t rows = (1UL << n); uint64_t z = 0; //xpu::task qf_t(qft_fold_worker,0,0,0,n,qubit,m,&v,&res); //xpu::parallel_for process(z,rows,1,&qf_t); //process.run(); static const uint64_t SIZE = 1000; #ifdef USE_OPENMP #pragma omp parallel for #endif for (int64_t batch = 0; batch <= rows / SIZE; batch++) { qft_nth_fold_worker(batch*SIZE,std::min<uint64_t>((batch+1)*SIZE,rows),1,n,qubit,m,&v,&res); } } int qft_worker(int cs, int ce, int s, size_t n, cvector_t& p_in, cvector_t& p_out, kronecker_ui kr, size_t qubit) { complex_t * in = p_in.data(); complex_t * out = p_out.data(); cvector_t & amp = p_out; // xpu::parallel_for fswp(__bit_set(0,b1), (1 << qn), (1 << (b1+1)), &t); size_t b = cs; size_t e = ce; rw_process_ui(cs, ce, s, n, qubit, kr, &p_in, &p_out); // H size_t bit2 = qubit; for (size_t j=qubit+1; j<n; ++j) { complex_t p(cos(QX_PI/(1UL << (j-qubit))), sin(QX_PI/(1UL << (j- qubit)))); size_t bit1 = j; size_t step=(1UL << (bit1+1)); size_t offset = __bit_set(0,bit1); for (size_t i=b; i<e; i++) { println("i=" << i*step); __shift(amp,bit1,bit2,p,offset+(i*step)); } } return 0; } int qft_worker(int cs, int ce, int s, size_t n, cvector_t& p_in, cvector_t& p_out, kronecker_iui kr, size_t qubit) { complex_t * in = p_in.data(); complex_t * out = p_out.data(); cvector_t & amp = p_out; // xpu::parallel_for fswp(__bit_set(0,b1), (1 << qn), (1 << (b1+1)), &t); size_t b = cs; size_t e = ce; rw_process_iui(cs, ce, s, n, qubit, kr, &p_in, &p_out); // H return 0; size_t bit2 = qubit; for (size_t j=qubit+1; j<n; ++j) { complex_t p(cos(QX_PI/(1UL << (j-qubit))), sin(QX_PI/(1UL << (j- qubit)))); size_t bit1 = j; size_t step=(1UL << (bit1+1)); size_t offset = __bit_set(0,bit1); for (size_t i=b; i<e; i++) { __shift(p_out,bit1,bit2,p,offset+(i*step)); } } return 0; } /** * \brief qft */ class qft : public gate { private: std::vector<uint64_t> qubit; cmatrix_t hm; public: qft(std::vector<uint64_t> qubit) : qubit(qubit) { hm = build_matrix(hadamard_c,2); } int64_t apply(qu_register& qreg) { size_t n = qreg.size(); size_t s = qreg.states(); cvector_t& in = qreg.get_data(); cvector_t& out = qreg.get_aux(); // kronecker_ui kui(hm,2,(1 << (n-1))); kronecker_ui kui(hadamard_c,2,(1 << (n-1))); qft_1st_fold(n, 0, kui, in, out); for (size_t i=1; i<n-1; ++i) { size_t q = qubit[i]; // kronecker_iui kiui(hm, 2, (1 << (n-q-1)), (1 << (q))); kronecker_iui kiui(hadamard_c, 2, (1UL << (n-q-1)), (1UL << (q))); qft_nth_fold(n, 0, kiui, in, out); } in.swap(out); return 0; #if 0 // 1st fold qft_worker(0, s, 1, n, in, out, kronecker_ui(m,2,s-2), 0); return 0; // ith fold for (size_t i=1; i<qubit.size(); ++i) { size_t q = qubit[i]; kronecker_iui k(m, 2, (1UL << (n-q-1)), (1UL << (q))); qft_worker(0, qreg.states(), 1, qreg.size(), (qreg.get_data()), (qreg.get_aux()), k, q); } // last fold kronecker_iu k(m,2,(1UL << (n-1))); sparse_mulmv(n,qubit[n-1],k,qreg.get_data(),qreg.get_aux()); in.swap(out); return 0; #endif } void dump() { print(" [-] qft("); for (size_t i=0; i<(qubit.size()-1); ++i) print("q" << qubit[i] << ","); println("q" << qubit[qubit.size()-1] << ")"); } std::vector<uint64_t> qubits() { return qubit; } std::vector<uint64_t> control_qubits() { return qubit; } std::vector<uint64_t> target_qubits() { return qubit; } gate_type_t type() { return __qft_gate__; } }; /** * phase shifter */ void __apply_cm(complex_t * state, complex_t m[2][2], std::size_t i11, std::size_t i12, std::size_t i13, std::size_t i21, std::size_t i22, std::size_t i23, std::size_t i31, std::size_t i32, std::size_t ish ) { complex_t m00 = m[0][0], m01 = m[0][1], m10 = m[1][0], m11 = m[1][1]; for(std::size_t r1 = i11; r1 < i12; r1 += i13) { #ifdef USE_OPENMP // #pragma omp parallel for #endif for(std::size_t r2 = r1 + i21; r2 < r1 + i22; r2 += i23) { for(std::size_t ind0 = r2 + i31; ind0 < r2 + i32; ind0++) { std::size_t ind1 = ind0 + ish; complex_t in0 = state[ind0], in1 = state[ind1]; state[ind0] = m00 * in0 + m01 * in1; state[ind1] = m10 * in0 + m11 * in1; } } } } /** * \brief controlled phase shift by arbitrary phase angle or (2*pi/(2^(k=ctrl-target))) */ class ctrl_phase_shift : public gate { private: uint64_t ctrl_qubit; uint64_t target_qubit; complex_t z; complex_t m[2][2]; double phase; protected: void build_operator() { m[0][0] = complex_t(cos(-phase/2), sin(-phase/2)); m[0][1] = 0.0; m[1][0] = 0.0; m[1][1] = complex_t(cos(phase/2), sin(phase/2)); double n = m[0][0].norm(); if (n > 10e-9) { complex_t p(m[0][0].re/n,m[0][0].im/n); m[0][0] /= p; m[0][1] /= p; m[1][0] /= p; m[1][1] /= p; } else { n = m[0][1].norm(); complex_t p(m[0][0].re/n,m[0][0].im/n); m[0][0] /= p; m[0][1] /= p; m[1][0] /= p; m[1][1] /= p; } } public: /** * ctor (q) */ ctrl_phase_shift(uint64_t ctrl_qubit, uint64_t target_qubit) : ctrl_qubit(ctrl_qubit), target_qubit(target_qubit), z(0.0, 0.0) { phase = 2*QX_PI/(1UL << (ctrl_qubit - target_qubit)); build_operator(); } /** * ctor (k) */ ctrl_phase_shift(uint64_t ctrl_qubit, uint64_t target_qubit, size_t k) : ctrl_qubit(ctrl_qubit), target_qubit(target_qubit) { phase = 2*QX_PI/(1UL << k); build_operator(); } /** * ctor (p) */ ctrl_phase_shift(uint64_t ctrl_qubit, uint64_t target_qubit, double angle) : ctrl_qubit(ctrl_qubit), target_qubit(target_qubit) { phase = angle; build_operator(); } int64_t apply(qu_register& qreg) { uint64_t n = qreg.size(); complex_t * s = qreg.get_data().data(); size_t c = ctrl_qubit; size_t t = target_qubit; if (c > t) __apply_cm(qreg.get_data().data(), m, 0UL, (1UL << n), 1UL << (c+1l), 1UL << c, 1UL << (c+1UL), 1UL << (t+1UL), 0UL, 1UL << t, 1UL << t); else __apply_cm(qreg.get_data().data(), m, 0UL, (1UL << n), 1UL << (t+1UL), 0UL, 1UL << t, 1UL << (c+1l), 1UL << c, 1UL<< (c+1UL), 1UL << t); return 0; } void dump() { println(" [-] ctrl_phase_shift(ctrl_qubit=" << ctrl_qubit << ", target_qubit: " << target_qubit << ", phase = (" << z.re << ", i." << z.im << ") )"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(ctrl_qubit); r.push_back(target_qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; r.push_back(ctrl_qubit); return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(target_qubit); return r; } gate_type_t type() { return __ctrl_phase_shift_gate__; } }; /** * \brief swap : * * | 1 0 0 0 | * | 0 0 1 0 | * | 0 1 0 0 | * | 0 0 0 1 | */ class swap : public gate { private: uint64_t qubit1; uint64_t qubit2; // cmatrix_t m; public: swap(uint64_t qubit1, uint64_t qubit2) : qubit1(qubit1), qubit2(qubit2) { // m = build_matrix(swap_c,4); } int64_t apply(qu_register& qreg) { cnot(qubit1,qubit2).apply(qreg); cnot(qubit2,qubit1).apply(qreg); cnot(qubit1,qubit2).apply(qreg); return 0; } void dump() { println(" [-] swap(q1=" << qubit1 << ", q2=" << qubit2 << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit1); r.push_back(qubit2); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit1); r.push_back(qubit2); return r; } gate_type_t type() { return __swap_gate__; } }; /** * \brief cphase */ class cphase : public gate { private: uint64_t ctrl_qubit; uint64_t target_qubit; public: cphase(uint64_t ctrl_qubit, uint64_t target_qubit) : ctrl_qubit(ctrl_qubit), target_qubit(target_qubit) { } int64_t apply(qu_register& qreg) { hadamard(target_qubit).apply(qreg); cnot(ctrl_qubit,target_qubit).apply(qreg); hadamard(target_qubit).apply(qreg); return 0; } void dump() { println(" [-] cphase(ctrl_qubit=" << ctrl_qubit << ", target_qubit=" << target_qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(ctrl_qubit); r.push_back(target_qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; r.push_back(ctrl_qubit); return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(target_qubit); return r; } gate_type_t type() { return __cphase_gate__; } }; /** * \brief custom matrix gate * */ class custom : public gate { private: // std::vector<uint64_t> qubits; uint64_t qubit; cmatrix_t m; public: // #ifdef __BUILTIN_LINALG__ // custom(std::vector<uint64_t> qubits, qx::linalg::matrix<complex_t> m) : qubits(qubits), m(m) // #else custom(uint64_t qubit, cmatrix_t m) : qubit(qubit), m(m) // #endif { // uint64_t size = 1 << qubits.size(); // if (size != m.size1() || size != m.size2()) // println("[x] error: cutom gate : the matrix size do not match the number of qubits !"); // verify also that the matrix is unitary // #ifdef __BUILTIN_LINALG__ // cmatrix_t ctr(m.size2(),m.size1()); // qx::linalg::matrix<complex_t> ctr(m.size2(),m.size1()); // for (uint64_t i=0; i<m.size2(); ++i) // for (uint64_t j=0; j<m.size1(); ++j) // ctr(i,j) = m(j,i).conj(); // // cmatrix_t mxctr = mxm(m,ctr); // qx::linalg::matrix<complex_t> mxctr = mxm(m,ctr); // qx::linalg::identity_matrix<complex_t> id(m.size1()); // #else // cmatrix_t mxctr = mxm(m,ublas::trans(conj(m))); // ublas::identity_matrix<complex_t> id(m.size1()); // #endif // #ifdef __BUILTIN_LINALG__ // if (qx::linalg::equals(mxctr,id)) // #else // if (equals(mxctr,id)) // #endif // println("[x] error: custom gate : the specified matrix is not unitary !"); } /** * apply */ int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); qreg.set_measurement_prediction(qubit,__state_unknown__); return 0; } /** * dump */ void dump() { println(" [-] custom matrix on qubit " << qubit); // println(" [-] custom(qubits=" << qubits << ", matrix=" << m << ")"); } /** * type */ gate_type_t type() { return __custom_gate__; } }; double p1_worker(uint64_t cs, uint64_t ce, uint64_t qubit, cvector_t * p_data) { cvector_t &data = * p_data; double local_p1 = 0; uint64_t ref = 1UL << qubit; uint64_t offset = 0; // We need to calculate the "offset_start" in order to maintain the // correctness of the index calculation in the parallel region uint64_t reminder = cs % ref; uint64_t factor = std::floor((cs - reminder) / ref); uint64_t offset_start = factor * ref; offset = offset_start; /* ******************************************************************************* */ // The following for-loop is a decimal-based representation of the identical binary- // based for-loop: // uint64_t size = qreg.size(); // uint64_t n = (1 << size); // std::bitset<MAX_QB_N> b; // b.reset(); // b.set(qubit); // for (uint64_t i = b.to_ulong(); i < n; i=b.to_ulong()) { // p += data[i].norm(); // b = inc(b); // b.set(qubit); // } /* ******************************************************************************* */ for (uint64_t i = cs; i < ce; ++i) { if (!(i % ref)) offset = ref + i; local_p1 += data[i + offset].norm(); } return local_p1; } inline double zero_worker_norm(uint64_t cs, uint64_t ce, cvector_t * p_data) { uint64_t num_elts = ce - cs; uint64_t tile_size = std::min<uint64_t>(num_elts, 32UL); complex_t * vd = p_data->data(); double local_length = 0.; #if defined(__AVX__) __m256d sum = _mm256_set1_pd(0.0); for (uint64_t i=cs; i<ce; i+=tile_size) { for (uint64_t j=i, end=std::min(ce,tile_size+i); j<end; j+=2) { double * pvd = (double*)&vd[j]; sum = _mm256_add_pd(sum, _mm256_mul_pd(_mm256_load_pd(pvd), _mm256_load_pd(pvd))); } } __m256d r2 = _mm256_hadd_pd(sum, sum); local_length = _mm_cvtsd_f64(_mm_add_pd(_mm256_extractf128_pd(r2, 1), _mm256_castpd256_pd128(r2))); #elif defined(__SSE__) __m128d sum = _mm_set1_pd(0.0); for (uint64_t i=cs; i<ce; i+=tile_size) { for (uint64_t j=i, end=std::min(ce,tile_size+i); j<end; ++j) { double * pvd = (double*)&vd[j]; sum = _mm_add_pd(sum, _mm_mul_pd(_mm_load_pd(pvd), _mm_load_pd(pvd))); } } local_length = _mm_cvtsd_f64(_mm_hadd_pd(sum, sum)); #else for (uint64_t i=cs; i<ce; i+=tile_size) { for (uint64_t j=i, end=std::min(ce,tile_size+i); j<end; j+=2) { local_length += data[j].norm() + data[j+1].norm(); } } #endif return local_length; } inline double zero_worker_true(uint64_t cs, uint64_t ce, uint64_t s, /*double * length,*/ uint64_t qubit, /*xpu::lockable * l, */cvector_t * p_data) { cvector_t &data = * p_data; uint64_t pos = 1UL << qubit; for (uint64_t i=cs; i<ce; i+=2) { // ((x) & (1<<(pos))) if (i & pos) data[i] = 0.0; if ((i+1) & pos) data[i+1] = 0.0; } return zero_worker_norm(cs, ce, p_data); } inline double zero_worker_false(uint64_t cs, uint64_t ce, uint64_t s, /*double * length,*/ uint64_t qubit, /*xpu::lockable * l, */cvector_t * p_data) { cvector_t &data = * p_data; uint64_t pos = 1UL << qubit; for (uint64_t i=cs; i<ce; i+=2) { // ((x) & (1<<(pos))) if (!(i & pos)) { data[i] = 0.0; } if (!((i+1) & pos)) data[i+1] = 0.0; } return zero_worker_norm(cs, ce, p_data); } int renorm_worker(uint64_t cs, uint64_t ce, uint64_t s, double * length, cvector_t * p_data) { cvector_t &data = * p_data; double l = *length; double l_rec = 1./l; uint64_t num_elts = ce - cs; uint64_t tile_size = std::min<uint64_t>(num_elts, 16UL); complex_t * vd = p_data->data(); #ifdef __AVX512F__ __m512d vl = _mm512_set1_pd(l_rec); #ifdef USE_OPENMP #pragma omp parallel for #endif for (uint64_t i=cs; i<ce; i+=tile_size) { for (uint64_t j=i, end=std::min(ce,tile_size+i); j<end; j+=4) { double * pvd = (double*)&vd[j]; _mm512_store_pd(pvd, _mm512_mul_pd(_mm512_load_pd(pvd), vl)); } } #elif defined(__AVX__) __m256d vl = _mm256_set1_pd(l_rec); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int64_t i=cs; i<(int64_t)ce; i+=tile_size) { for (uint64_t j=(uint64_t)i, end=std::min(ce,tile_size+(uint64_t)i); j<end; j+=2) { double * pvd = (double*)&vd[j]; _mm256_store_pd(pvd, _mm256_mul_pd(_mm256_load_pd(pvd), vl)); } } #elif defined(__SSE__) __m128d vl = _mm_set1_pd(l_rec); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int64_t i=cs; i<(int64_t)ce; i+=tile_size) { for (uint64_t j=(uint64_t)i, end=std::min(ce,tile_size+(uint64_t)i); j<end; ++j) { double * pvd = (double*)&vd[j]; _mm_store_pd(pvd, _mm_mul_pd(_mm_load_pd(pvd), vl)); } } #else #ifdef USE_OPENMP #pragma omp parallel for #endif for (int64_t i=cs; i<(int64_t)ce; i+=tile_size) { for (uint64_t j=(uint64_t)i, end=std::min(ce,tile_size+(uint64_t)i); j<end; ++j) { data[j] *= l_rec; } } #endif // // Update the remaining elements if there are any // uint64_t reminder = num_elts % tile_size; // if (reminder) { // for (uint64_t i=ce-reminder; i<ce; ++i) // { // data[i] *= l_rec; // } // } return 0; } /** * measure */ class measure : public gate { private: uint64_t qubit; bool measure_all; bool disable_averaging; public: measure(uint64_t qubit, bool disable_averaging=false) : qubit(qubit), measure_all(false), disable_averaging(disable_averaging) { } measure() : qubit(0), measure_all(true) { } int64_t apply(qu_register& qreg) { if (measure_all) { // qreg.measure(); for (size_t q=0; q<qreg.size(); q++) qx::measure(q).apply(qreg); return 0; } double f = qreg.rand(); double p = 0; int64_t value; uint64_t size = qreg.size(); uint64_t n = (1UL << size); cvector_t& data = qreg.get_data(); double length = 0; // Basically, this "if" operator determines what to do if we have more than 64 qubits. // It also determines whether to invoke parallel or sequential computations. As of now, // we set parallel execution as the default one. if (1)//size > 64) // if (size > 64) { // #define PARALLEL_MEASUREMENT // #ifdef PARALLEL_MEASUREMENT /*xpu::lockable * l = new xpu::core::os::mutex(); xpu::task p1_worker_t(p1_worker, (uint64_t)0, n, (uint64_t)1, &p, qubit, l, &data); xpu::parallel_for parallel_p1( (uint64_t)0, n, (uint64_t)1, &p1_worker_t); parallel_p1.run();*/ static const uint64_t SIZE = 1000; uint64_t ref = 1UL << qubit; uint64_t range = (n >> 1); #ifdef USE_OPENMP #pragma omp parallel for reduction(+: p) #endif for (int64_t batch = 0; batch <= (int64_t)range / SIZE; batch++) { p += p1_worker(batch*SIZE, std::min<uint64_t>((batch+1)*SIZE, range), qubit, &data); } if (f<p) value = 1; else value = 0; #ifdef USE_OPENMP #pragma omp parallel { #endif if (value) { #ifdef USE_OPENMP #pragma omp for reduction(+: length) #endif for (int64_t batch = 0; batch <= (int64_t)n / SIZE; batch++) { length += zero_worker_false(batch*SIZE, std::min<uint64_t>((batch+1)*SIZE,n), (uint64_t)1, qubit, &data); } } else { #ifdef USE_OPENMP #pragma omp for reduction(+: length) #endif for (int64_t batch = 0; batch <= (int64_t)n / SIZE; batch++) { length += zero_worker_true(batch*SIZE, std::min<uint64_t>((batch+1)*SIZE,n), (uint64_t)1, qubit, &data); } } #ifdef USE_OPENMP } #endif length = std::sqrt(length); renorm_worker((uint64_t)0, n, (uint64_t)1, &length, &data); } else { //#else int64_t k, l, m; int64_t j = qubit; double fvalue; std::bitset<MAX_QB_N> b; b.reset(); b.set(qubit); uint64_t bc = b.to_ulong(); while (bc < n) { bc = b.to_ulong(); p += data[bc].norm(); b = inc(b); b.set(qubit); bc = b.to_ulong(); } if (f<p) value = 1; else value = 0; if (value) // 1 { // reset all states where the qubit is 0 for (uint64_t i=0; i<(1UL << size); ++i) { if (!__bit_test(i,qubit)) data[i] = 0.0; } } else { for (uint64_t i=0; i<(1UL << size); ++i) { if (__bit_test(i,qubit)) data[i] = 0.0; } } for (uint64_t k = 0; k < (1UL << size); k++) length += data[k].norm(); //std::norm(data[k]); length = std::sqrt(length); for (uint64_t k = 0; k < (1UL << size); k++) data[k] /= length; // #endif // PARALLEL_MEASUREMENT } // println(" [>] measured value : " << value); qreg.set_measurement_prediction(qubit,(value == 1 ? __state_1__ : __state_0__)); qreg.set_measurement(qubit,(value == 1 ? true : false)); //qreg.set_binary(qubit,(value == 1 ? __state_1__ : __state_0__)); if (!disable_averaging) { if (qreg.measurement_averaging_enabled) { if (value == 1) { // println("> exited_states++"); qreg.measurement_averaging[qubit].exited_states++; } else { // println("> ground_states++"); qreg.measurement_averaging[qubit].ground_states++; } } } return value; } void dump() { if (measure_all) println(" [-] measure(register)"); else println(" [-] measure(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; if (!measure_all) r.push_back(qubit); else // this is a dirty hack, itshould be fixed later (unknown qubit number !) { for (int64_t i=0; i<MAX_QB_N; ++i) r.push_back(i); } return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { return qubits(); } gate_type_t type() { if (measure_all) return __measure_reg_gate__; else return __measure_gate__; } }; /** * measure_x */ class measure_x : public gate { private: uint64_t qubit; bool measure_all; bool disable_averaging; qx::hadamard hg; qx::measure mg; public: measure_x(uint64_t qubit, bool disable_averaging=false) : qubit(qubit), measure_all(false), hg(qubit), mg(qubit), disable_averaging(disable_averaging) { } measure_x() : qubit(0), hg(qubit), mg(qubit), measure_all(true) { } int64_t apply(qu_register& qreg) { int64_t r = 0; if (measure_all) { for (size_t i=0; i<qreg.size(); ++i) qx::hadamard(i).apply(qreg); qreg.measure(); for (size_t i=0; i<qreg.size(); ++i) qx::hadamard(i).apply(qreg); return 0; } hg.apply(qreg); r = mg.apply(qreg); hg.apply(qreg); return r; } void dump() { if (measure_all) println(" [-] measure_x(register)"); else println(" [-] measure_x(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; if (!measure_all) r.push_back(qubit); else // this is a dirty hack, itshould be fixed later (unknown qubit number !) { for (int64_t i=0; i<MAX_QB_N; ++i) r.push_back(i); } return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { return qubits(); } gate_type_t type() { if (measure_all) return __measure_x_reg_gate__; else return __measure_x_gate__; } }; /** * measure_y */ class measure_y : public gate { private: uint64_t qubit; bool measure_all; bool disable_averaging; qx::phase_shift sg; qx::pauli_z zg; qx::measure_x mg; /* S(qubit); Z(qubit); bool b = MeasX(qubit, randint); S(qubit); */ public: measure_y(uint64_t qubit, bool disable_averaging=false) : qubit(qubit), measure_all(false), sg(qubit), zg(qubit), mg(qubit), disable_averaging(disable_averaging) { } measure_y() : qubit(0), sg(qubit), zg(qubit), mg(), measure_all(true) { } int64_t apply(qu_register& qreg) { int64_t r = 0; if (measure_all) { for (size_t i=0; i<qreg.size(); ++i) { qx::phase_shift(i).apply(qreg); qx::pauli_z(i).apply(qreg); } mg.apply(qreg); for (size_t i=0; i<qreg.size(); ++i) qx::phase_shift(i).apply(qreg); return 0; } sg.apply(qreg); zg.apply(qreg); r = mg.apply(qreg); sg.apply(qreg); return r; } void dump() { if (measure_all) println(" [-] measure_y(register)"); else println(" [-] measure_y(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; if (!measure_all) r.push_back(qubit); else // this is a dirty hack, itshould be fixed later (unknown qubit number !) { for (int64_t i=0; i<MAX_QB_N; ++i) r.push_back(i); } return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { return qubits(); } gate_type_t type() { if (measure_all) return __measure_y_reg_gate__; else return __measure_y_gate__; } }; /** * \brief generic binary controlled gate */ class bin_ctrl : public gate { private: // uint64_t bit; std::vector<size_t> bits; gate * g; public: bin_ctrl(size_t bit, gate * g) : g(g) { bits.push_back(bit); } bin_ctrl(std::vector<size_t> bit, gate * g) : g(g) { for (auto b : bit) bits.push_back(b); } int64_t apply(qu_register& qreg) { bool m = true; for (auto b : bits) if (!qreg.test(b)) m = false; if (m) g->apply(qreg); return 0; } gate * get_gate() { return g; } std::vector<size_t> get_bits() { return bits; } void dump() { print(" [-] bin_ctrl: \n bit=" << bits[0] << " -> "); g->dump(); } std::vector<uint64_t> qubits() { return g->qubits(); } std::vector<uint64_t> control_qubits() { return g->control_qubits(); } std::vector<uint64_t> target_qubits() { return g->target_qubits(); } gate_type_t type() { return __bin_ctrl_gate__; } }; #define bin_ctrl_pauli_x(b,q) bin_ctrl(b,new pauli_x(q)) #define bin_ctrl_pauli_y(b,q) bin_ctrl(b,new pauli_y(q)) #define bin_ctrl_pauli_z(b,q) bin_ctrl(b,new pauli_z(q)) /** * \brief classical binary not gate */ class classical_not : public gate { private: uint64_t bit; public: classical_not(uint64_t bit) : bit(bit) { } int64_t apply(qu_register& qreg) { qreg.flip_measurement(bit); return 0; } uint64_t get_bit() { return bit; } void dump() { // println(" [-] classical not gate: \n bit=" << bit); println(" [-] not " << bit); } std::vector<uint64_t> qubits() { return std::vector<uint64_t>(); } std::vector<uint64_t> control_qubits() { return std::vector<uint64_t>(); } std::vector<uint64_t> target_qubits() { return std::vector<uint64_t>(); } gate_type_t type() { return __classical_not_gate__; } }; /** * prepz */ class prepz : public gate { private: uint64_t qubit; public: prepz(uint64_t qubit) : qubit(qubit) { } int64_t apply(qu_register& qreg) { measure(qubit,true).apply(qreg); bin_ctrl_pauli_x(qubit,qubit).apply(qreg); // bin_ctrl_pauli_z(qubit,qubit).apply(qreg); qreg.set_measurement(qubit,false); return 0; } void dump() { println(" [-] prepz(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { return qubits(); } gate_type_t type() { return __prepz_gate__; } }; /** * prepx */ class prepx : public gate { private: uint64_t qubit; hadamard h; public: prepx(uint64_t qubit) : qubit(qubit), h(qubit) { } int64_t apply(qu_register& qreg) { h.apply(qreg); measure(qubit,true).apply(qreg); h.apply(qreg); bin_ctrl_pauli_z(qubit,qubit).apply(qreg); qreg.set_measurement(qubit,false); return 0; } void dump() { println(" [-] prepx(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { return qubits(); } gate_type_t type() { return __prepx_gate__; } }; /** * prepy */ class prepy : public gate { private: uint64_t qubit; prepx px; phase_shift s; public: prepy(uint64_t qubit) : qubit(qubit), px(qubit), s(qubit) { } int64_t apply(qu_register& qreg) { px.apply(qreg); s.apply(qreg); qreg.set_measurement(qubit,false); return 0; } void dump() { println(" [-] prepy(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { return qubits(); } gate_type_t type() { return __prepy_gate__; } }; class lookup_gate_table : public gate { private: std::vector<uint64_t> ctrl_bits; std::map<uint64_t,gate *> gates; public: lookup_gate_table(uint64_t b0) { ctrl_bits.push_back(b0); } lookup_gate_table(uint64_t b0, uint64_t b1) { ctrl_bits.push_back(b0); ctrl_bits.push_back(b1); } lookup_gate_table(uint64_t b0, uint64_t b1, uint64_t b2) { ctrl_bits.push_back(b0); ctrl_bits.push_back(b1); ctrl_bits.push_back(b2); } lookup_gate_table(std::vector<uint64_t> ctrl_bits) : ctrl_bits(ctrl_bits) { } void add_gate(uint64_t cond, gate * g) { assert(cond < (1<< ctrl_bits.size())); gates[cond] = g; } int64_t apply(qu_register& qreg) { uint64_t k = 0; for (uint64_t i=0; i<ctrl_bits.size(); i++) { //println(qreg.get_binary(i)); if (qreg.test(ctrl_bits[i])) k = k * 2 + 1; else k *= 2; } // println("[+] lookup table : cond = " << k); std::map<uint64_t,gate*>::iterator it = gates.find(k); if (it != gates.end()) (*it).second->apply(qreg); return 0; } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; // to do std::map<uint64_t,gate *>::iterator ig; for (ig=gates.begin(); ig!=gates.end(); ++ig) { std::vector<uint64_t> ri = ig->second->qubits(); r.insert(r.begin(), ri.begin(), ri.end()); } return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; // to do std::map<uint64_t,gate *>::iterator ig; for (ig=gates.begin(); ig!=gates.end(); ++ig) { std::vector<uint64_t> ri = ig->second->control_qubits(); if (ri.size()) r.insert(r.begin(), ri.begin(), ri.end()); } return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; // to do std::map<uint64_t,gate *>::iterator ig; for (ig=gates.begin(); ig!=gates.end(); ++ig) { std::vector<uint64_t> ri = ig->second->target_qubits(); if (ri.size()) r.insert(r.begin(), ri.begin(), ri.end()); } return r; } void dump() { println(" [-] lookup gate table : "); } gate_type_t type() { return __lookup_table__; } }; /** * \brief display : debug utility * display intermediate quantum states of a * quantum register whithin a circuit. */ class display : public gate { private: bool only_binary; public: display(bool only_binary=false) : only_binary(only_binary) { } int64_t apply(qu_register& qreg) { qreg.dump(only_binary); return 0; } void dump() { println(" [-] display(only_binary=" << only_binary << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; return r; } gate_type_t type() { if (only_binary) return __display_binary__; else return __display__; } }; /** * parallel gates */ class parallel_gates : public gate { public: parallel_gates() { } int64_t apply(qu_register& qreg) { for (uint64_t i=0; i<gates.size(); i++) gates[i]->apply(qreg); return 0; } uint64_t add(gate * g) { gates.push_back(g); return gates.size(); } std::vector<gate *> get_gates() { return gates; } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; for (uint64_t i=0; i<gates.size(); i++) { std::vector<uint64_t> q = gates[i]->qubits(); r.insert(r.end(),q.begin(),q.end()); } return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; for (uint64_t i=0; i<gates.size(); i++) { std::vector<uint64_t> q = gates[i]->control_qubits(); r.insert(r.end(),q.begin(),q.end()); } return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; for (uint64_t i=0; i<gates.size(); i++) { std::vector<uint64_t> q = gates[i]->target_qubits(); r.insert(r.end(),q.begin(),q.end()); } return r; } void dump() { println(" [-] parallel_gates (" << gates.size() << " gates) : "); for (uint64_t i=0; i<gates.size(); i++) gates[i]->dump(); } gate_type_t type() { return __parallel_gate__; } private: std::vector<gate *> gates; // list of the parallel gates }; /** * prepare the qubits into an arbitrary quantum state */ class prepare : public gate { private: quantum_state_t * state; public: prepare(quantum_state_t * state) : state(state) { } int64_t apply(qu_register& qreg) { qreg.reset(); cvector_t& q = qreg.get_data(); double norm = 0; for (quantum_state_t::iterator i=state->begin(); i != state->end(); ++i) { basis_state_t bs = (*i).first; complex_t c = (*i).second; // println("bs=" << bs << ", a=" << c); q[bs] = c; norm += c.norm(); //std::norm(c); } if (std::fabs(norm-1) > QUBIT_ERROR_THRESHOLD) { println("[!] warning : the loaded quantum state is not normalized (norm = " << norm << ") !"); println("[!] renormalizing the quantum state..."); qreg.normalize(); println("[!] quantum state renormalized successfully."); } for (size_t qi=0; qi<qreg.size(); ++qi) { qreg.set_measurement_prediction(qi,__state_unknown__); //qreg.set_binary(qi,__state_unknown__); } return 0; } void dump() { println(" [-] prepare (quantum_state=" << state << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; // this is a dirty hack, itshould be fixed later (unknown qubit number !) for (int64_t i=0; i<MAX_QB_N; ++i) r.push_back(i); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { return qubits(); } gate_type_t type() { return __prepare_gate__; } }; /** * \brief print : debug utility * print arbitrary string */ class print_str : public gate { private: std::string str; public: print_str(std::string& s) : str(s) { } int64_t apply(qu_register& qreg) { println(str); return 0; } void dump() { println(" print " << str << "\""); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; return r; } gate_type_t type() { return __print_str__; } }; } #endif // QX_GATE_H
binary-search.h
/* * Copyright 2018-2021 Kyle Berney * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef BINARY_SEARCH_H #define BINARY_SEARCH_H #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <math.h> #include <omp.h> #include <time.h> #include "common.h" //Performs binary search on the sorted array A of size n template<typename TYPE> uint64_t binarySearch(TYPE *A, uint64_t n, TYPE query) { uint64_t mid; uint64_t left = 0; uint64_t right = n - 1; while (left <= right) { mid = (left + right + 1) / 2; //left + ((right - left + 1) / 2); if (query == A[mid]) { return mid; } else if (query > A[mid]) { left = mid + 1; } else { right = mid - 1; } } return right; //query not found } //Performs all of the queries given in the array queries //index in A of the queried items are saved in the answers array template<typename TYPE> void searchAll(TYPE *A, uint64_t n, TYPE *queries, uint64_t *answers, uint64_t numQueries, uint32_t p) { #pragma omp parallel for shared(A, n, queries, answers, numQueries, p) schedule(guided) num_threads(p) for (uint64_t i = 0; i < numQueries; ++i) { answers[i] = binarySearch<TYPE>(A, n, queries[i]); } } //Generates numQueries random queries and returns the milliseconds needed to perform the queries template<typename TYPE> double timeQuery(TYPE *A, uint64_t n, uint64_t numQueries, uint32_t p) { struct timespec start, end; TYPE *queries = createRandomQueries<TYPE>(A, n, numQueries); //array to store random queries to perform uint64_t *answers = (uint64_t *)malloc(numQueries * sizeof(uint64_t)); //array to store the answers (i.e., index of the queried item) clock_gettime(CLOCK_MONOTONIC, &start); searchAll<TYPE>(A, n, queries, answers, numQueries, p); clock_gettime(CLOCK_MONOTONIC, &end); double ms = ((end.tv_sec*1000000000. + end.tv_nsec) - (start.tv_sec*1000000000. + start.tv_nsec)) / 1000000.; //millisecond #ifdef VERIFY bool correct = true; for (uint64_t i = 0; i < numQueries; i++) { if (answers[i] == n || A[answers[i]] != queries[i]) { #ifdef DEBUG printf("query = %lu; found = %lu\n", queries[i], A[answers[i]]); #endif correct = false; } } if (correct == false) printf("Searches failed!\n"); else printf("Searches succeeded!\n"); #endif free(queries); free(answers); return ms; } #endif
Pusher.h
#pragma once #include "Constants.h" #include "Species.h" #include "FieldValue.h" #include <array> #include <vector> #include <iostream> namespace pfc { class ParticlePusher { public: template<class T_Particle> inline void operator()(T_Particle* particle, ValueField& field, FP timeStep) {}; template<class T_ParticleArray> inline void operator()(T_ParticleArray* particleArray, std::vector<ValueField>& fields, FP timeStep) { }; }; class BorisPusher : public ParticlePusher { public: template<class T_Particle> inline void operator()(T_Particle* particle, ValueField& field, FP timeStep) { FP3 e = field.getE(); FP3 b = field.getB(); FP eCoeff = timeStep * particle->getCharge() / (2 * particle->getMass() * Constants<FP>::lightVelocity()); FP3 eMomentum = e * eCoeff; FP3 um = particle->getP() + eMomentum; FP3 t = b * eCoeff / sqrt((FP)1 + um.norm2()); FP3 uprime = um + cross(um, t); FP3 s = t * (FP)2 / ((FP)1 + t.norm2()); particle->setP(eMomentum + um + cross(uprime, s)); particle->setPosition(particle->getPosition() + timeStep * particle->getVelocity()); } template<class T_ParticleArray> inline void operator()(T_ParticleArray* particleArray, std::vector<ValueField>& fields, FP timeStep) { typedef typename T_ParticleArray::ParticleProxyType ParticleProxyType; #pragma omp parallel for simd for (int i = 0; i < particleArray->size(); i++) { ParticleProxyType particle = (*particleArray)[i]; operator()(&particle, fields[i], timeStep); } }; }; class RadiationReaction : public ParticlePusher { public: template<class T_Particle> inline void operator()(T_Particle* particle, ValueField& field, FP timeStep) { if (particle->getType() == Electron || particle->getType() == Positron) { FP3 e = field.getE(); FP3 b = field.getB(); FP3 v = particle->getVelocity(); FP gamma = particle->getGamma(); FP c = Constants<FP>::lightVelocity(); FP electronCharge = Constants<FP>::electronCharge(); FP electronMass = Constants<FP>::electronMass(); FP3 dp = timeStep * (2.0 / 3.0) * sqr(sqr(electronCharge) / (electronMass * sqr(c))) * (VP(e, b) + (1 / c) * (VP(b, VP(b, v)) + SP(v, e) * e) - (1 / c) * sqr(gamma) * (sqr(e + (1 / c) * VP(v, b)) - sqr(SP(e, v) / c)) * v); particle->setMomentum(particle->getMomentum() + dp); } } template<class T_ParticleArray> inline void operator()(T_ParticleArray* particleArray, std::vector<ValueField>& fields, FP timeStep) { typedef typename T_ParticleArray::ParticleProxyType ParticleProxyType; #pragma omp parallel for simd for (int i = 0; i < particleArray->size(); i++) { ParticleProxyType particle = (*particleArray)[i]; operator()(&particle, fields[i], timeStep); } }; }; }
Example_array_shaping.1.c
/* * @@name: array_shaping.1.c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_5.0 */ #pragma omp declare target int do_work(double *a, int nx, int ny); int other_work(double *a, int nx, int ny); #pragma omp end declare target void exch_data(double *a, int nx, int ny); void array_shaping(double *a, int nx, int ny) { // map data to device and do work #pragma omp target data map(a[0:nx*(ny+2)]) { // do work on the device #pragma omp target // map(a[0:nx*(ny+2)]) is optional here do_work(a, nx, ny); // update boundary points (two columns of 2D array) on the host // pointer is shaped to 2D array using the shape-operator #pragma omp target update from( (([nx][ny+2])a)[0:nx][1], \ (([nx][ny+2])a)[0:nx][ny] ) // exchange ghost points with neighbors exch_data(a, nx, ny); // update ghost points (two columns of 2D array) on the device // pointer is shaped to 2D array using the shape-operator #pragma omp target update to( (([nx][ny+2])a)[0:nx][0], \ (([nx][ny+2])a)[0:nx][ny+1] ) // perform other work on the device #pragma omp target // map(a[0:nx*(ny+2)]) is optional here other_work(a, nx, ny); } }
AdPointDataReaders.h
/* Ralf Kaehler 14 December 2016 Copyright (c) 2016, The Board of Trustees of the Leland Stanford Junior University, through SLAC National Accelerator Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. (3) Neither the name of the Leland Stanford Junior University, SLAC National Accelerator Laboratory, U.S. Dept. of Energy nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER, THE UNITED STATES GOVERNMENT, OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You are under no obligation whatsoever to provide any bug fixes, patches, or upgrades to the features, functionality or performance of the source code ("Enhancements") to anyone; however, if you choose to make your Enhancements available either publicly, or directly to SLAC National Accelerator Laboratory, without imposing a separate written license agreement for such Enhancements, then you hereby grant the following license: a non-exclusive, royalty-free perpetual license to install, use, modify, prepare derivative works, incorporate into other computer software, distribute, and sublicense such Enhancements or derivative works thereof, in binary and source code form. */ #ifndef _AD_POINT_DATA_READERS_ #define _AD_POINT_DATA_READERS_ #include <cfloat> #include <string> #include "AdMeasureMPIWallClockTime.h" #include "AdUtils.h" #include "AdAssert.h" #include "AdLagrangianRedistribution.h" namespace AdaptiveMassDeposit { static inline AABBox get_global_bbox_mpi( const bool use_local_bbox, const AABBox& local_bbox ) { float loc_min[3] = {local_bbox.min[0],local_bbox.min[1],local_bbox.min[2]}; float loc_max[3] = {local_bbox.max[0],local_bbox.max[1],local_bbox.max[2]}; float global_bbox_min[3]; float global_bbox_max[3]; if ( use_local_bbox==false ) { loc_min[0] = loc_min[1] = loc_min[2] = FLT_MAX; loc_max[0] = loc_max[1] = loc_max[2] = FLT_MIN; } CHECK_MPI_ERROR( MPI_Allreduce(loc_min, global_bbox_min, 3, MPI_FLOAT, MPI_MIN, MPI_COMM_WORLD) ); CHECK_MPI_ERROR( MPI_Allreduce(loc_max, global_bbox_max, 3, MPI_FLOAT, MPI_MAX, MPI_COMM_WORLD) ); AABBox res; res.min = PosVec(global_bbox_min[0],global_bbox_min[1],global_bbox_min[2]); res.max = PosVec(global_bbox_max[0],global_bbox_max[1],global_bbox_max[2]); return res; } static AABBox get_local_bbox( const std::vector<std::shared_ptr<particles_with_ids_chunk> >& particles_by_proc ) { AABBox box; bool has_local_particles = false; // first initialize bbox for ( size_t p=0; p<particles_by_proc.size(); ++p ) { if ( particles_by_proc[p]->get_num()>0 ) { pos_t pos[3]; particles_by_proc[p]->get_position( 0, pos ); box.min = PosVec( pos[0],pos[1],pos[2] ); box.max = PosVec( pos[0],pos[1],pos[2] ); has_local_particles = true; break; } } if ( has_local_particles==false ) { throw AdRuntimeException("WARNING: get_local_bbox(): have no local particles.",false); } // to-do: try to optimize this using OpemMP for ( size_t p=0; p<particles_by_proc.size(); ++p ) { const Particles::pos_t_vec pos_x_ptr = *particles_by_proc[p]->get_positions( 0 ); const Particles::pos_t_vec pos_y_ptr = *particles_by_proc[p]->get_positions( 1 ); const Particles::pos_t_vec pos_z_ptr = *particles_by_proc[p]->get_positions( 2 ); for ( size_t i=0; i<pos_x_ptr.size(); ++i ) { box.min = PosVec( std::min(pos_x_ptr[i],box.min[0]), std::min(pos_y_ptr[i],box.min[1]), std::min(pos_z_ptr[i],box.min[2]) ); box.max = PosVec( std::max(pos_x_ptr[i],box.max[0]), std::max(pos_y_ptr[i],box.max[1]), std::max(pos_z_ptr[i],box.max[2]) ); } } return box; } static AABBox get_global_bbox_mpi( const std::vector< std::shared_ptr<particles_with_ids_chunk> >& particles_by_proc ) { try { const AABBox local_bbox = get_local_bbox( particles_by_proc ); return get_global_bbox_mpi( true, local_bbox ); } catch( std::exception& ex ) { AABBox local_bbox; std::cout << "WARNING: get_global_bbox_mpi(): Local process does not own any particles yet. Computing global bbox without it." << std::endl; return get_global_bbox_mpi( false, local_bbox ); } } template <class ID_MAPPING_ORDER> static void apply_boundary_corrections(const AABBox& bbox, const AvVec3i& grid_dims, const ID_MAPPING_ORDER& id_functor, particles_with_ids_chunk& particles ) { const pos_t box_ext[3] = { bbox.get_extension(0), bbox.get_extension(1), bbox.get_extension(2) }; const pos_t box_ext_h1[3] = { bbox.min[0]+box_ext[0]/pos_t(4.0), bbox.min[1]+box_ext[1]/pos_t(4.0), bbox.min[2]+box_ext[2]/pos_t(4.0) }; const pos_t box_ext_h2[3] = { bbox.min[0]+pos_t(3./4.)*box_ext[0], bbox.min[1]+pos_t(3./4.)*box_ext[1], bbox.min[2]+pos_t(3./4.)*box_ext[2] }; const pos_t td_h[3] = { pos_t(grid_dims[0])/pos_t(2.0), pos_t(grid_dims[1])/pos_t(2.0), pos_t(grid_dims[2])/pos_t(2.0) }; Particles::pos_t_vec& px = *particles.get_positions(0); Particles::pos_t_vec& py = *particles.get_positions(1); Particles::pos_t_vec& pz = *particles.get_positions(2); Particles::ids_t_vec& ids = *particles.get_ids(); // to-do: should/could parallelize this using OpenMP for ( size_t p=0; p<particles.get_num(); ++p ) { // use id_functor to translate linear index into 3D index const AvVec3i idx = id_functor.map_linear_to_3D_idx( ids[p], grid_dims ); pos_t pos[3] = { px[p], py[p], pz[p] }; //const AvVislib::AvVec3D<T> pos(positions[idx],positions[idx+1],positions[idx+2]); bool need_update = false; for ( int i=0; i<3; ++i ) { if ( idx[i]>td_h[i] && pos[i]<box_ext_h1[i] ) { pos[i] += box_ext[i]; need_update = true; } else if ( idx[i]<td_h[i] && pos[i]>box_ext_h2[i]) { pos[i] -= box_ext[i]; need_update = true; } } if ( need_update ) { px[p] = pos[0]; py[p] = pos[1]; pz[p] = pos[2]; } } // end loop over p } class AdPointDataReader { public: inline AvVec3i get_rank_dims() const { return procs_dims_; } inline AvVec3i get_global_particle_dims() const { return global_particle_dims_; } inline std::vector< std::shared_ptr<particles_with_ids_chunk> >& get_particles_per_rank( ) { return particles_per_proc_; } inline size_t load_data(const std::string& filename, const int my_rank, const int num_procs, const int stride ) { if ( filename.empty() ) { throw AdRuntimeException( "ERROR: AdPointDataReader::load_data(): missing filename." ); } if (my_rank>=num_procs || my_rank<0 || num_procs<1 ) { throw AdRuntimeException( "ERROR: AdPointDataReader::load_data(): invalid MPI rank parameters." ); } if ( stride<1 ) { throw AdRuntimeException( "ERROR: AdPointDataReader::load_data(): invalid 'stride' parameters." ); } // allocate bins to store the data particles_per_proc_.resize( num_procs ); for ( size_t i=0; i<particles_per_proc_.size(); ++i ) { particles_per_proc_[i] = std::shared_ptr<particles_with_ids_chunk>(new particles_with_ids_chunk); } const size_t num_particles = load_data_(filename, my_rank, num_procs, stride, procs_dims_, global_particle_dims_, particles_per_proc_ ); if ( num_particles == 0 ) { std::cerr << "WARNING: AdPointDataReaders()::load_data(): call returned 0 particles on MPI rank == " << my_rank << std::endl; } AD_ASSERT( checkInvariant_( my_rank, num_procs ), "ERROR: AdPointDataReaders(): checkInvariants_() failed." ); return num_particles; } protected: virtual size_t load_data_(const std::string& filename, const int my_rank, const int num_procs, const int stride, AvVec3i& procs_dims, AvVec3i& global_particle_dims, std::vector< std::shared_ptr<particles_with_ids_chunk> >& particles_per_proc ) = 0; private: bool checkInvariant_( const int my_rank, const int num_procs ) const { assert( procs_dims_[0]*procs_dims_[1]*procs_dims_[2] == num_procs ); size_t particle_count = 0; for ( int p=0; p<particles_per_proc_.size(); ++p ) { particle_count += particles_per_proc_[p]->get_num(); for ( int i=0; i<particles_per_proc_[p]->get_num(); ++i ) { // we are assuming column-major array order in the reaminder of the code, so readers have to remap if necessary const size_t lin_id = particles_per_proc_[p]->get_id(i); const AvVec3i lid = AdaptiveMassDeposit::ColumnMajorOrder::map_linear_to_3D_idx( lin_id, global_particle_dims_ ); AvVec3i tmp_block_offset; AvVec3i tmp_block_dims; // check if point is really assigned to the correct process AdaptiveMassDeposit::LagrangianRedistribution::get_patch_info<ColumnMajorOrder>(num_procs, p, procs_dims_, global_particle_dims_, tmp_block_offset, tmp_block_dims ); AD_ASSERT_C( lid[0]>=tmp_block_offset[0] && lid[0]<(tmp_block_offset[0]+tmp_block_dims[0]), { AvVec3i(lid[0],lid[1],lid[2]).print(); tmp_block_offset.print(); tmp_block_dims.print();} ); assert( lid[1]>=tmp_block_offset[1] && lid[1]<(tmp_block_offset[1]+tmp_block_dims[1]) ); assert( lid[2]>=tmp_block_offset[2] && lid[2]<(tmp_block_offset[2]+tmp_block_dims[2]) ); } } return true; } private: AvVec3i procs_dims_; AvVec3i global_particle_dims_; std::vector< std::shared_ptr<particles_with_ids_chunk> > particles_per_proc_; }; class AdPointDataReadersFactory { public: virtual std::shared_ptr<AdPointDataReader> create_reader() = 0; }; template< class ReaderClass > class AdPointDataReadersCreator : public AdPointDataReadersFactory { public: virtual std::shared_ptr<AdPointDataReader> create_reader() { return std::shared_ptr<ReaderClass> ( new ReaderClass() ); } }; class AdDarkSkyDataReader : public AdPointDataReader { protected: static inline void morton_2_grid( const int64_t morton, int64_t grid_id[3]) { static const int64_t mask = (1LL<<48)-1; int64_t key = morton & mask; int level = 0; grid_id[0] = 0; grid_id[1] = 0; grid_id[2] = 0; while ( key>0 ) { grid_id[2] += (key & 1) << level; key = key >> 1; grid_id[1] += (key & 1) << level; key = key >> 1; grid_id[0] += (key & 1) << level; key = key >> 1; level += 1; } std::swap( grid_id[0], grid_id[2] ); } static inline uint64_t grid_2_morton( const unsigned int x, const unsigned int y, const unsigned int z) { uint64_t morton = 0; static const uint64_t mb = (sizeof(uint64_t)*CHAR_BIT)/3; for ( uint64_t i=0; i<mb; ++i ) { const uint64_t i2 = 2*i; const uint64_t is = uint64_t(1) << i; morton |= ( (x&is)<<i2) | ( (y&is)<<(i2+1)) | ( (z&is)<<(i2+2)) ; } return morton; } static size_t get_header_size( const std::string& filename ) { std::filebuf fb; if ( !fb.open (filename.c_str(),std::ios::in|std::ios_base::binary) ) { throw AdRuntimeException( "ERROR: get_header_size(): failed to open file: " + filename ); } std::istream infile(&fb); char buffer[1024]; size_t security_c = 0; int sha1_chunks = -1; bool found_eoh = false; while ( infile.good() && security_c++<100000 ) { infile.getline (buffer, 1024 ); std::string line(buffer); if ( line.compare(0, 17, "int sha1_chunks =")==0 ) { std::string tmp = line.substr(line.find("=")+1, line.find(";")-line.find("=")-1); sha1_chunks = atoi(tmp.c_str()); } if ( line.compare(0, 9, "# SDF-EOH") == 0 ) { found_eoh = true; break; } } /* struct { unsigned int sha1_len; unsigned char sha1[20]; }[16]; */ // header size == current file pos + offet for sha1 const size_t header_size = size_t(infile.tellg()) + sha1_chunks*(sizeof(int)+20); fb.close(); if ( found_eoh==false || sha1_chunks<0 || security_c++>=100000 ) { throw AdRuntimeException("Failed to parse header of file: " + filename ); } return header_size; } virtual size_t load_data_(const std::string& filename, const int my_rank, const int num_procs, const int stride, AvVec3i& procs_dims, AvVec3i& global_particle_dims, std::vector< std::shared_ptr<particles_with_ids_chunk> >& particles_per_rank ) { #if 1 // first get header size of SDF file unsigned long header_size = 0; if ( my_rank== 0 ) { header_size = get_header_size(filename); } CHECK_MPI_ERROR( MPI_Bcast( &header_size, 1, MPI_LONG_INT, 0, MPI_COMM_WORLD) ); if ( header_size==0 ) { throw AdRuntimeException( "ERROR: AdDarkSkyDataReader::load_data_(): invalid header size" ); } MPI_File file_handle; CHECK_MPI_ERROR( MPI_File_open( MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handle) ); //size_t file_size = get_file_size(filename); MPI_Offset file_size = 0; CHECK_MPI_ERROR( MPI_File_get_size( file_handle, &file_size ) ); /* struct darksky_particle { float x, y, z; float vx, vy, vz; int64_t id; } ds_part_buffer; */ static const size_t bytes_per_particle = 6*sizeof(float)+sizeof(int64_t); static const int ids_offset = 6*sizeof(float); // hack: assuming that number of particle structs is divisible by number of processors const uint64_t total_num_particles = (file_size-header_size)/(bytes_per_particle); if ( (file_size-header_size)%bytes_per_particle!=0 ) { throw AdRuntimeException("ERROR: AdDarkSkyDataReader::load_data_(): inconsistent file size "); } const size_t linear_particle_dims = rint( cbrt(total_num_particles) ); if ( linear_particle_dims*linear_particle_dims*linear_particle_dims!= total_num_particles ) { throw AdRuntimeException("ERROR: AdDarkSkyDataReader::load_data_(): lagrangian grid not cubical "); } else { std::cout << "INFO: AdDarkSkyDataReader::load_data_(): linear lagrangian grid dimension: " << linear_particle_dims << std::endl; } const size_t particles_per_proc = ceil( float(total_num_particles)/num_procs); const unsigned int src_dims[3] = { static_cast<unsigned int>(linear_particle_dims/stride), static_cast<unsigned int>(linear_particle_dims/stride), static_cast<unsigned int>(linear_particle_dims/stride) }; unsigned int dst_dims[3]; ProcessLayout::find_best_match( num_procs, src_dims, dst_dims); procs_dims = AvVec3i( dst_dims[0],dst_dims[1],dst_dims[2] ); const AvVec3i process_3D_idx = ColumnMajorOrder::map_linear_to_3D_idx( uint64_t(my_rank), procs_dims );; int effective_rank = -1; const bool power_of_two = (procs_dims[0]>0) && ( !( procs_dims[0] & (procs_dims[0]-1)) ); if ( power_of_two && procs_dims[1] == procs_dims[0] && procs_dims[2]==procs_dims[0] ) { for ( int64_t rank=0; rank<num_procs; ++rank ) { int64_t lid[3] = {0,0,0}; morton_2_grid(int64_t( rank ),lid); AD_ASSERT( grid_2_morton(lid[0],lid[1],lid[2])==uint64_t(rank), "" ); if ( AvVec3i(lid[0],lid[1],lid[2])==process_3D_idx ) { effective_rank = rank; break; } } } else { effective_rank = my_rank; } if ( effective_rank<0 || effective_rank>=num_procs ) { throw AdRuntimeException("ERROR: AdDarkSkyDataReader::load_data_(): inconsistent effective process id. "); } global_particle_dims = AvVec3i(linear_particle_dims,linear_particle_dims,linear_particle_dims); if ( global_particle_dims[0]%stride!=0 || global_particle_dims[1]%stride!=0 || global_particle_dims[2]%stride!=0 ) { throw AdRuntimeException("ERROR: AdDarkSkyDataReader::load_data_(): invalid stride for data dimensions "); } global_particle_dims[0] /= stride; global_particle_dims[1] /= stride; global_particle_dims[2] /= stride; AD_VERBOSE( 1, { std::cout << "INFO: AdDarkSkyDataReader::load_data_(): (MPI_rank==" << my_rank << "): Starting to load " << particles_per_proc << " of the total " << total_num_particles << " particles. " << std::endl; } ); // compute number of cells per processor (upper rows could contain fewer particles) const AvVec3i cells_per_proc((global_particle_dims[0]-1 + (procs_dims[0]-1))/procs_dims[0], (global_particle_dims[1]-1 + (procs_dims[1]-1))/procs_dims[1], (global_particle_dims[2]-1 + (procs_dims[2]-1))/procs_dims[2] ); const size_t start = effective_rank*particles_per_proc; //const size_t start = my_rank*particles_per_proc; // skip to start of our piece of the cake CHECK_MPI_ERROR( MPI_File_set_view( file_handle, header_size+start*bytes_per_particle, MPI_CHAR, MPI_CHAR, "native", MPI_INFO_NULL ) ); static const size_t particles_per_pass = 50000000; static const size_t buffer_size = particles_per_pass*( bytes_per_particle ); std::vector<char> buffer(buffer_size,0); size_t c = start; size_t loc_c = 0; bool file_good = true; size_t num_effective_particles = 0; while ( file_good && loc_c<particles_per_proc && c<total_num_particles ) { const size_t new_size = std::min(buffer.size()/bytes_per_particle, std::min( particles_per_proc-loc_c,size_t(total_num_particles-c)) ); buffer.resize( new_size*bytes_per_particle ); if ( buffer.empty() ) { std::cout << "INFO: AdDarkSkyDataReader::load_data_(): BREAK. " << std::endl; break; } MPI_Status status; if ( MPI_File_read( file_handle, &buffer[0], buffer.size(), MPI_CHAR, &status ) != MPI_SUCCESS ) { std::cout << "WARNING: AdDarkSkyDataReader::load_data_(): file_handle = " << file_handle << " buffer.size()=" << buffer.size() << std::endl; throw AdRuntimeException("ERROR: AdDarkSkyDataReader::load_data_(): Failed to read all particles for lagrangian input grid."); } AD_ASSERT( buffer.size()%bytes_per_particle==0, "ERROR: AdDarkSkyDataReader::load_data_(): invalid number of read bytes" ); const size_t num_read_particles = buffer.size()/bytes_per_particle; #pragma omp parallel for reduction(+:loc_c) reduction(+:c) //schedule(dynamic) for ( size_t i=0; i<num_read_particles; ++i ) { const size_t bytes_counter = i*bytes_per_particle; const float* float_ptr = reinterpret_cast<const float*>(&buffer.front()+bytes_counter); const float pos[3] = { float_ptr[0],float_ptr[1], float_ptr[2] }; const int64_t id = *(reinterpret_cast<int64_t*>( &buffer.front()+bytes_counter+ids_offset )); int64_t lid[3]; morton_2_grid(id, lid); AD_ASSERT( lid[0]>=0 || lid[1]>=0 || lid[2]>=0 , "ERROR: AdDarkSkyDataReader::load_data_(): invalid dark sky id" ); AD_ASSERT( lid[0]<200000 || lid[1]<200000 || lid[2]<200000 , "ERROR: AdDarkSkyDataReader::load_data_(): invalid dark sky id" ); const bool skip_particle = ( stride!=1 && (lid[0]%stride!=0 || lid[1]%stride!=0 || lid[2]%stride!=0) ); if ( !skip_particle ) { // compute effective particles ID lid[0] /= stride; lid[1] /= stride; lid[2] /= stride; particle_with_id new_part; //const pos_t pos[3] = { pos[0],pos[1],pos[2]); const ids_t lin_id = lid[0]+(global_particle_dims[0])*(lid[1]+lid[2]*(global_particle_dims[1])); int target_ranks[8]; const int num_results = LagrangianRedistribution::get_target_processes<ColumnMajorOrder>(AvVec3i(lid[0],lid[1],lid[2]), procs_dims, global_particle_dims , AvVec3i(cells_per_proc), target_ranks ); // to-do: get rid of critial section using private data and reduction .... #pragma omp critical { for ( int n=0; n<num_results; ++n ) { #ifndef NDEBUG { const AvVec3i idx_3D = ColumnMajorOrder::map_linear_to_3D_idx( lin_id, global_particle_dims ); assert( idx_3D==AvVec3i(lid[0],lid[1],lid[2]) ); AvVec3i tmp_block_offset; AvVec3i tmp_block_dims; // check if point is really assigned to the correct process AdaptiveMassDeposit::LagrangianRedistribution::get_patch_info<ColumnMajorOrder>(num_procs, target_ranks[n], procs_dims, global_particle_dims, tmp_block_offset, tmp_block_dims ); AD_ASSERT_C( lid[0]>=tmp_block_offset[0] && lid[0]<(tmp_block_offset[0]+tmp_block_dims[0]), { AvVec3i(lid[0],lid[1],lid[2]).print(); tmp_block_offset.print(); tmp_block_dims.print();} ); assert( lid[1]>=tmp_block_offset[1] && lid[1]<(tmp_block_offset[1]+tmp_block_dims[1]) ); assert( lid[2]>=tmp_block_offset[2] && lid[2]<(tmp_block_offset[2]+tmp_block_dims[2]) ); } #endif particles_per_rank[target_ranks[n]]->push_back( pos[0],pos[1],pos[2], lin_id ); } ++num_effective_particles; } } // end if (!skip_particle) // save using reduction pragma ++loc_c; ++c; } // end for i<num_read_particles AD_VERBOSE( 1, {std::cout << "INFO: AdDarkSkyDataReader::load_data_(): (MPI_rank==" << my_rank << "): Read " << 100.*loc_c/float(particles_per_proc) << "% of its particles." << std::endl;} ); } // end while CHECK_MPI_ERROR( MPI_File_close( &file_handle ) ); AD_VERBOSE( 1, { std::cout << "INFO: AdDarkSkyDataReader::load_data_(): (MPI_rank==" << my_rank << ") Finished reading " << loc_c << " particles." << std::endl; } ); { long long int global_num_particles = 0; long long int local_num_particles = num_effective_particles; CHECK_MPI_ERROR( MPI_Allreduce(&local_num_particles, &global_num_particles, 1, MPI_LONG_LONG, MPI_SUM, MPI_COMM_WORLD) ); if( global_num_particles != size_t(global_particle_dims[0])*size_t(global_particle_dims[1])*size_t(global_particle_dims[2]) ) { std::cout << global_num_particles << std::endl; std::cout << size_t(global_particle_dims[0])*size_t(global_particle_dims[1])*size_t(global_particle_dims[2]) << std::endl; global_particle_dims.print(); throw AdRuntimeException("ERROR: AdDarkSkyDataReader::load_data_(): inconsistent global particle number"); } } return num_effective_particles; #endif } }; class AdSortedByIdsPointDataReader : public AdPointDataReader { protected: virtual size_t load_data_(const std::string& filename, const int my_rank, const int num_procs, const int stride, AvVec3i& procs_dims, AvVec3i& global_particle_dims, std::vector< std::shared_ptr<particles_with_ids_chunk> >& particles_per_proc) { assert( particles_per_proc.size() == num_procs ); if ( stride>1 ) { std::cout << "WARNING: AdSortedByIdsPointDataReader::load__data(): 'stride' parameter currently not supported - will ignore it ... " << std::endl; } MPI_File file_handle = 0; if ( MPI_File_open( MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handle )!=MPI_SUCCESS ) { throw AdRuntimeException("ERROR: AdInitialization_IDS::load_particle_data(): failed to open file: " + filename ); } MPI_Offset file_size = 0; CHECK_MPI_ERROR( MPI_File_get_size( file_handle, &file_size ) ); static const size_t bytes_per_particle = 3*sizeof(float); if ( file_size%bytes_per_particle!=0 ) { throw AdRuntimeException("ERROR: AdInitialization_IDS::load_particle_data(): inconsistent file size "); } const uint64_t global_num_particles = file_size/bytes_per_particle; global_particle_dims = AvVec3i( rint(cbrt(global_num_particles)) ); if ( size_t(global_particle_dims[0])*size_t(global_particle_dims[1])*size_t(global_particle_dims[2])!= global_num_particles ) { throw AdRuntimeException("ERROR: AdInitialization_IDS::load_particle_data(): lagrangian grid not cubical "); } { const unsigned int src_dims[3] = { static_cast<unsigned int>(global_particle_dims[0]), static_cast<unsigned int>(global_particle_dims[1]), static_cast<unsigned int>(global_particle_dims[2]) }; unsigned int dst_dims[3]; ProcessLayout::find_best_match( num_procs, src_dims, dst_dims); procs_dims = AvVec3i( dst_dims[0], dst_dims[1], dst_dims[2] ); } AvVec3i block_offset; AvVec3i particle_block_dims; LagrangianRedistribution::get_patch_info< ColumnMajorOrder >( num_procs, my_rank, procs_dims, global_particle_dims, block_offset, particle_block_dims ); CHECK_MPI_ERROR( MPI_File_set_view( file_handle, 0, MPI_FLOAT, MPI_FLOAT, "native", MPI_INFO_NULL ) ); std::vector<float> buffer( 3*particle_block_dims[0] ); size_t c = 0; for ( size_t k=0; k<particle_block_dims[2]; ++k ) { for ( size_t j=0; j<particle_block_dims[1]; ++j ) { const size_t linear_offset = block_offset[0] + global_particle_dims[0]*( (block_offset[1]+j) + global_particle_dims[1]*(block_offset[2]+k) ) ; // are we still inside the grid of active particles ? AD_ASSERT( linear_offset<global_num_particles, "" ); AD_ASSERT_C( (linear_offset+particle_block_dims[0])<=global_num_particles, { printf("ERROR: %lu, %i, %llu\n",linear_offset,particle_block_dims[0], (long long unsigned int)global_num_particles);} ); AD_ASSERT( bytes_per_particle*(linear_offset+particle_block_dims[0])<=size_t(file_size), "bug"); MPI_Status status; CHECK_MPI_ERROR( MPI_File_read_at( file_handle, 3*linear_offset, &buffer[0], buffer.size(), MPI_FLOAT, &status ) ); { int count = 0; CHECK_MPI_ERROR( MPI_Get_count( &status, MPI_FLOAT, &count )); AD_ASSERT_C( size_t(count)==buffer.size() && size_t(count)==3*particle_block_dims[0], { printf("ERROR: file I/O: invalid number of read items(%i) vs particle_lock_dims[0](%i). index=[%lu,%lu], offset==%lu\n",count,particle_block_dims[0],j,k, linear_offset);} ); } for ( size_t i=0; i<particle_block_dims[0]; ++i ) { const size_t idx = 3*i; AD_ASSERT( (idx+2) < buffer.size(), "bug" ); const pos_t pos[3] = { buffer[idx], buffer[idx+1], buffer[idx+2] }; const ids_t id = linear_offset+i; assert( (block_offset[0]+i) < global_particle_dims[0] ); assert( (block_offset[1]+j) < global_particle_dims[1] ); assert( (block_offset[2]+k) < global_particle_dims[2] ); // per construction each particle read by this rank is for this rank ... particles_per_proc[my_rank]->push_back( pos[0],pos[1],pos[2],id ); ++c; } } } AD_ASSERT_C( particles_per_proc[my_rank]->get_num() == (particle_block_dims[0]*particle_block_dims[1]*particle_block_dims[2]), { printf("%lu, %i, %i, %i\n", particles_per_proc[my_rank]->get_num(), particle_block_dims[0], particle_block_dims[1], particle_block_dims[2]);}); AD_VERBOSE(0, { std::cout << "INFO: AdInitialization_IDS::load_particle_data(): Process " << my_rank << " read " << c << " particles. Closing file." << std::endl; } ); CHECK_MPI_ERROR( MPI_File_close( &file_handle ) ); return c; } }; static std::shared_ptr<AdPointDataReader> get_reader( const std::string& format ) { if ( format.find("SORTED_BY_ID")!=std::string::npos ) { return std::shared_ptr<AdPointDataReader>( new AdSortedByIdsPointDataReader() ); } else if ( format.find("DARK_SKY")!=std::string::npos ) { return std::shared_ptr<AdPointDataReader>( new AdDarkSkyDataReader() ); } else { throw AdRuntimeException("ERROR: id_type " + format + " not supported."); } } } #endif
Login.h
struct Credential{ char username[SIZE]; char password[SIZE]; }; // Muhammad Haekal Al Ghifary <2006577605> // hide cursor agar blinking tidak keliatan void HideCursor(){ CONSOLE_CURSOR_INFO cursor; cursor.bVisible = FALSE; cursor.dwSize = sizeof(cursor); HANDLE handle = GetStdHandle(STD_OUTPUT_HANDLE); SetConsoleCursorInfo(handle, &cursor); } // Muhammad Haekal Al Ghifary <2006577605> int login_comparison(char user_input[], char login_info[], int *flag, int *iterator){ if (strcmp(user_input, login_info) == 0){ *flag = 1; *iterator = count_lines("credential/login.txt"); } } // Muhammad Haekal Al Ghifary <2006577605> int is_login_valid(struct Credential input_cred){ int i; int FOUND = 0; char filename[] = "credential/login.txt"; FILE *file = fopen(filename, "r"); if (file == NULL){ printf("Gagal untuk membuka %s.\n", filename); exit(0); } char combine_cred[SIZE]; strcpy(combine_cred, input_cred.username); strcat(combine_cred, ":"); strcat(combine_cred, input_cred.password); strcat(combine_cred, "\n"); char file_login[SIZE]; int lines = count_lines(filename); #pragma omp parallel num_threads(4) { // cari login info secara parallel #pragma omp for for(i = 0; i < lines; i++){ fgets(file_login, sizeof(file_login), file); #pragma omp critical login_comparison(combine_cred, file_login, &FOUND, &i); } } fclose(file); return FOUND; } // Muhammad Haekal Al Ghifary <2006577605> void login_prompt(char **user){ int i = 0; char char_pass; struct Credential login_info; while(1){ printf("Username:\n"); printf("Password: "); strcpy(login_info.username, ""); SetCursorPosition(10, 0); scanf("%s", login_info.username); SetCursorPosition(10, 1); // hide password with asterisk i = 0; strcpy(login_info.password, ""); // flush the last password while(1){ char_pass = getch(); if (char_pass == 13) break; login_info.password[i] = char_pass; printf("*"); i++; } system("cls"); if (is_login_valid(login_info)){ HideCursor(); // pisah @gmail.com dan pass ke parameter user char *token; *user = malloc(SIZE * sizeof(char)); token = strtok(login_info.username, "@"); strcpy(*user, token); break; } else{ SetCursorPosition(0, 2); printf("\nUsername dan password tidak ditemukan!"); SetCursorPosition(0, 0); } } }
quantize.h
// Copyright 2018 The MACE Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_UTILS_QUANTIZE_H_ #define MACE_UTILS_QUANTIZE_H_ #include <algorithm> #include <cmath> #include <limits> #if defined(MACE_ENABLE_NEON) #include <arm_neon.h> #endif // MACE_ENABLE_NEON #include "mace/utils/logging.h" namespace mace { template<typename T> inline void AdjustRange(const float in_min_data, const float in_max_data, const bool non_zero, float *scale, int32_t *zero_point) { // re-range to make range include zero float and // make zero float as integer u8 const T quantized_min = std::numeric_limits<T>::lowest(); const T quantized_max = std::numeric_limits<T>::max(); if (quantized_min < 0) { MACE_ASSERT(!non_zero, "Cannot nudge to non_zero quantize value."); } float out_max = std::max(0.f, in_max_data); float out_min = std::min(0.f, in_min_data); // make in_min_data quantize as greater than 1 if (non_zero) { out_min = std::min(out_min, in_min_data - (out_max - in_min_data) / (quantized_max - quantized_min - 1)); } *scale = (out_max - out_min) / (quantized_max - quantized_min); const float kEps = 1e-6; if (out_min < -kEps && out_max > kEps) { float quantized_zero = -out_min / *scale; int32_t quantized_zero_near_int = static_cast<int32_t>(roundf(quantized_zero)); *zero_point = quantized_zero_near_int; if (fabs(quantized_zero - quantized_zero_near_int) > kEps) { if (quantized_zero < quantized_zero_near_int || non_zero) { // keep out_max fixed, and move out_min *zero_point = static_cast<int32_t>(std::ceil(quantized_zero)); *scale = out_max / (quantized_max - *zero_point); } else { // keep out_min fixed, and move out_max *scale = out_min / (quantized_min - *zero_point); } } } else if (out_min > -kEps) { *zero_point = quantized_min; } else { *zero_point = quantized_max; } } template<typename T> inline T Saturate(float value) { int rounded_value = static_cast<int>(value); if (rounded_value <= std::numeric_limits<T>::lowest()) { return std::numeric_limits<T>::lowest(); } else if (rounded_value >= std::numeric_limits<T>::max()) { return std::numeric_limits<T>::max(); } else { return static_cast<T>(rounded_value); } } inline void FindMinMax(const float *input, const index_t size, float *min_val, float *max_val) { float max_v = std::numeric_limits<float>::lowest(); float min_v = std::numeric_limits<float>::max(); for (index_t i = 0; i < size; ++i) { max_v = std::max(max_v, input[i]); min_v = std::min(min_v, input[i]); } *min_val = min_v; *max_val = max_v; } template<typename T> inline void QuantizeWithScaleAndZeropoint(const float *input, const index_t size, float scale, int32_t zero_point, T *output) { float recip_scale = 1 / scale; #pragma omp parallel for schedule(runtime) for (int i = 0; i < size; ++i) { output[i] = Saturate<T>(roundf(zero_point + recip_scale * input[i])); } } template<typename T> inline void Quantize(const float *input, const index_t size, bool non_zero, T *output, float *scale, int32_t *zero_point) { float in_min_data; float in_max_data; FindMinMax(input, size, &in_min_data, &in_max_data); AdjustRange<T>(in_min_data, in_max_data, non_zero, scale, zero_point); QuantizeWithScaleAndZeropoint(input, size, *scale, *zero_point, output); } template<typename T> inline void Quantize(const Tensor &input, Tensor *output, float *min_out, float *max_out) { MACE_CHECK(input.size() != 0); Tensor::MappingGuard input_guard(&input); Tensor::MappingGuard output_guard(output); auto *input_data = input.data<float>(); auto *output_data = output->mutable_data<T>(); float scale; int32_t zero_point; Quantize(input_data, input.size(), false, output_data, &scale, &zero_point); *min_out = scale * (std::numeric_limits<T>::lowest() - zero_point); *max_out = scale * (std::numeric_limits<T>::max() - zero_point); } template<typename T> inline void Dequantize(const T *input, const index_t size, const float scale, const int32_t zero_point, float *output) { #pragma omp parallel for schedule(runtime) for (int i = 0; i < size; ++i) { output[i] = scale * (input[i] - zero_point); } } #if defined(MACE_ENABLE_NEON) template<> inline void QuantizeWithScaleAndZeropoint<uint8_t>(const float *input, const index_t size, float scale, int32_t zero_point, uint8_t *output) { const float32x4_t vround = vdupq_n_f32(0.5); const float32x4_t vzero = vaddq_f32(vround, vcvtq_f32_s32(vdupq_n_s32(zero_point))); const float recip_scale = 1.f / scale; const float32x4_t vrecip_scale = vdupq_n_f32(recip_scale); const index_t block_count = size / 16; #pragma omp parallel for schedule(runtime) for (index_t i = 0; i < block_count; ++i) { float32x4_t vi0 = vld1q_f32(input + i * 16); float32x4_t vi1 = vld1q_f32(input + i * 16 + 4); float32x4_t vi2 = vld1q_f32(input + i * 16 + 8); float32x4_t vi3 = vld1q_f32(input + i * 16 + 12); int32x4_t vo0_s32 = vcvtq_s32_f32(vmlaq_f32(vzero, vi0, vrecip_scale)); int32x4_t vo1_s32 = vcvtq_s32_f32(vmlaq_f32(vzero, vi1, vrecip_scale)); int32x4_t vo2_s32 = vcvtq_s32_f32(vmlaq_f32(vzero, vi2, vrecip_scale)); int32x4_t vo3_s32 = vcvtq_s32_f32(vmlaq_f32(vzero, vi3, vrecip_scale)); uint8x8_t vo0_u8 = vqmovun_s16(vcombine_s16(vqmovn_s32(vo0_s32), vqmovn_s32(vo1_s32))); uint8x8_t vo1_u8 = vqmovun_s16(vcombine_s16(vqmovn_s32(vo2_s32), vqmovn_s32(vo3_s32))); uint8x16_t vo = vcombine_u8(vo0_u8, vo1_u8); vst1q_u8(output + i * 16, vo); } #pragma omp parallel for schedule(runtime) for (index_t i = block_count * 16; i < size; ++i) { output[i] = Saturate<uint8_t>(roundf(zero_point + recip_scale * input[i])); } } template<> inline void Dequantize<int32_t>(const int32_t *input, const index_t size, const float scale, const int32_t zero_point, float *output) { const index_t block_count = size / 4; const int32x4_t vzero = vdupq_n_s32(zero_point); const float32x4_t vscale = vdupq_n_f32(scale); #pragma omp parallel for schedule(runtime) for (index_t i = 0; i < block_count; ++i) { int32x4_t vi = vld1q_s32(input + i * 4); float32x4_t vo = vmulq_f32(vscale, vcvtq_f32_s32(vsubq_s32(vi, vzero))); vst1q_f32(output + i * 4, vo); } for (index_t i = block_count * 4; i < size; ++i) { output[i] = scale * (input[i] - zero_point); } } template<> inline void Dequantize<uint8_t>(const uint8_t *input, const index_t size, const float scale, const int32_t zero_point, float *output) { const index_t block_count = size / 16; const int32x4_t vzero = vdupq_n_s32(zero_point); const float32x4_t vscale = vdupq_n_f32(scale); #pragma omp parallel for schedule(runtime) for (index_t i = 0; i < block_count; ++i) { uint8x16_t vi = vld1q_u8(input + i * 16); float32x4x4_t vo = { vmulq_f32(vscale, vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16( vget_low_u16(vmovl_u8(vget_low_u8(vi))))), vzero))), vmulq_f32(vscale, vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16( vget_high_u16(vmovl_u8(vget_low_u8(vi))))), vzero))), vmulq_f32(vscale, vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16( vget_low_u16(vmovl_u8(vget_high_u8(vi))))), vzero))), vmulq_f32(vscale, vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16( vget_high_u16(vmovl_u8(vget_high_u8(vi))))), vzero))), }; vst1q_f32(output + i * 16, vo.val[0]); vst1q_f32(output + i * 16 + 4, vo.val[1]); vst1q_f32(output + i * 16 + 8, vo.val[2]); vst1q_f32(output + i * 16 + 12, vo.val[3]); } for (index_t i = block_count * 16; i < size; ++i) { output[i] = scale * (input[i] - zero_point); } } #endif // MACE_ENABLE_NEON template<typename T> inline void DeQuantize(const Tensor &input, const float min_in, const float max_in, Tensor *output) { MACE_CHECK(input.size() != 0); Tensor::MappingGuard input_guard(&input); Tensor::MappingGuard output_guard(output); auto *input_data = input.data<T>(); auto *output_data = output->mutable_data<float>(); float scale; int32_t zero_point; AdjustRange<T>(min_in, max_in, false, &scale, &zero_point); Dequantize(input_data, input.size(), scale, zero_point, output_data); } inline void QuantizeMultiplier(double multiplier, int32_t *output_multiplier, int32_t *shift) { const double q = std::frexp(multiplier, shift); auto qint = static_cast<int64_t>(roundl(q * (1ll << 31))); if (qint == (1ll << 31)) { qint /= 2; ++*shift; } *output_multiplier = static_cast<int32_t>(qint); MACE_CHECK(*output_multiplier <= std::numeric_limits<int32_t>::max()); } inline void GetOutputMultiplierAndShift( const float lhs_scale, const float rhs_scale, const float output_scale, int32_t *quantized_multiplier, int *right_shift) { float real_multiplier = lhs_scale * rhs_scale / output_scale; MACE_CHECK(real_multiplier > 0.f && real_multiplier < 1.f, real_multiplier); int exponent; QuantizeMultiplier(real_multiplier, quantized_multiplier, &exponent); *right_shift = -exponent; MACE_CHECK(*right_shift >= 0); } } // namespace mace #endif // MACE_UTILS_QUANTIZE_H_
GB_binop__div_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint8) // A*D function (colscale): GB (_AxD__div_uint8) // D*A function (rowscale): GB (_DxB__div_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__div_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__div_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint8) // C=scalar+B GB (_bind1st__div_uint8) // C=scalar+B' GB (_bind1st_tran__div_uint8) // C=A+scalar GB (_bind2nd__div_uint8) // C=A'+scalar GB (_bind2nd_tran__div_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 8) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_UNSIGNED (x, y, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_UINT8 || GxB_NO_DIV_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__div_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__div_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (x, bij, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (aij, y, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 8) ; \ } GrB_Info GB (_bind1st_tran__div_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 8) ; \ } GrB_Info GB (_bind2nd_tran__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Types.h
//===---------- Types.h - OpenMP types ---------------------------- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // //===----------------------------------------------------------------------===// #ifndef OMPTARGET_TYPES_H #define OMPTARGET_TYPES_H // Tell the compiler that we do not have any "call-like" inline assembly in the // device rutime. That means we cannot have inline assembly which will call // another function but only inline assembly that performs some operation or // side-effect and then continues execution with something on the existing call // stack. // // TODO: Find a good place for this #pragma omp assumes ext_no_call_asm /// Base type declarations for freestanding mode /// ///{ using int8_t = char; using uint8_t = unsigned char; using int16_t = short; using uint16_t = unsigned short; using int32_t = int; using uint32_t = unsigned int; using int64_t = long; using uint64_t = unsigned long; static_assert(sizeof(int8_t) == 1, "type size mismatch"); static_assert(sizeof(uint8_t) == 1, "type size mismatch"); static_assert(sizeof(int16_t) == 2, "type size mismatch"); static_assert(sizeof(uint16_t) == 2, "type size mismatch"); static_assert(sizeof(int32_t) == 4, "type size mismatch"); static_assert(sizeof(uint32_t) == 4, "type size mismatch"); static_assert(sizeof(int64_t) == 8, "type size mismatch"); static_assert(sizeof(uint64_t) == 8, "type size mismatch"); ///} enum omp_proc_bind_t { omp_proc_bind_false = 0, omp_proc_bind_true = 1, omp_proc_bind_master = 2, omp_proc_bind_close = 3, omp_proc_bind_spread = 4 }; enum omp_sched_t { omp_sched_static = 1, /* chunkSize >0 */ omp_sched_dynamic = 2, /* chunkSize >0 */ omp_sched_guided = 3, /* chunkSize >0 */ omp_sched_auto = 4, /* no chunkSize */ }; enum kmp_sched_t { kmp_sched_static_chunk = 33, kmp_sched_static_nochunk = 34, kmp_sched_dynamic = 35, kmp_sched_guided = 36, kmp_sched_runtime = 37, kmp_sched_auto = 38, kmp_sched_static_balanced_chunk = 45, kmp_sched_static_ordered = 65, kmp_sched_static_nochunk_ordered = 66, kmp_sched_dynamic_ordered = 67, kmp_sched_guided_ordered = 68, kmp_sched_runtime_ordered = 69, kmp_sched_auto_ordered = 70, kmp_sched_distr_static_chunk = 91, kmp_sched_distr_static_nochunk = 92, kmp_sched_distr_static_chunk_sched_static_chunkone = 93, kmp_sched_default = kmp_sched_static_nochunk, kmp_sched_unordered_first = kmp_sched_static_chunk, kmp_sched_unordered_last = kmp_sched_auto, kmp_sched_ordered_first = kmp_sched_static_ordered, kmp_sched_ordered_last = kmp_sched_auto_ordered, kmp_sched_distribute_first = kmp_sched_distr_static_chunk, kmp_sched_distribute_last = kmp_sched_distr_static_chunk_sched_static_chunkone, /* Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers. * Since we need to distinguish the three possible cases (no modifier, * monotonic modifier, nonmonotonic modifier), we need separate bits for * each modifier. The absence of monotonic does not imply nonmonotonic, * especially since 4.5 says that the behaviour of the "no modifier" case * is implementation defined in 4.5, but will become "nonmonotonic" in 5.0. * * Since we're passing a full 32 bit value, we can use a couple of high * bits for these flags; out of paranoia we avoid the sign bit. * * These modifiers can be or-ed into non-static schedules by the compiler * to pass the additional information. They will be stripped early in the * processing in __kmp_dispatch_init when setting up schedules, so * most of the code won't ever see schedules with these bits set. */ kmp_sched_modifier_monotonic = (1 << 29), /**< Set if the monotonic schedule modifier was present */ kmp_sched_modifier_nonmonotonic = (1 << 30), /**< Set if the nonmonotonic schedule modifier was present */ #define SCHEDULE_WITHOUT_MODIFIERS(s) \ (enum kmp_sched_t)( \ (s) & ~(kmp_sched_modifier_nonmonotonic | kmp_sched_modifier_monotonic)) #define SCHEDULE_HAS_MONOTONIC(s) (((s)&kmp_sched_modifier_monotonic) != 0) #define SCHEDULE_HAS_NONMONOTONIC(s) \ (((s)&kmp_sched_modifier_nonmonotonic) != 0) #define SCHEDULE_HAS_NO_MODIFIERS(s) \ (((s) & (kmp_sched_modifier_nonmonotonic | kmp_sched_modifier_monotonic)) == \ 0) }; struct TaskDescriptorTy; using TaskFnTy = int32_t (*)(int32_t global_tid, TaskDescriptorTy *taskDescr); struct TaskDescriptorTy { void *Payload; TaskFnTy TaskFn; }; #pragma omp begin declare variant match(device = {arch(amdgcn)}) using LaneMaskTy = uint64_t; #pragma omp end declare variant #pragma omp begin declare variant match( \ device = {arch(amdgcn)}, implementation = {extension(match_none)}) using LaneMaskTy = uint64_t; #pragma omp end declare variant namespace lanes { enum : LaneMaskTy { All = ~(LaneMaskTy)0 }; } // namespace lanes /// The ident structure that describes a source location. The struct is /// identical to the one in the kmp.h file. We maintain the same data structure /// for compatibility. struct IdentTy { int32_t reserved_1; /**< might be used in Fortran; see above */ int32_t flags; /**< also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC identifies this union member */ int32_t reserved_2; /**< not really used in Fortran any more; see above */ int32_t reserved_3; /**< source[4] in Fortran, do not use for C++ */ char const *psource; /**< String describing the source location. The string is composed of semi-colon separated fields which describe the source file, the function and a pair of line numbers that delimit the construct. */ }; using __kmpc_impl_lanemask_t = LaneMaskTy; using ParallelRegionFnTy = void *; using CriticalNameTy = int32_t[8]; struct omp_lock_t { void *Lock; }; using InterWarpCopyFnTy = void (*)(void *src, int32_t warp_num); using ShuffleReductFnTy = void (*)(void *rhsData, int16_t lane_id, int16_t lane_offset, int16_t shortCircuit); using ListGlobalFnTy = void (*)(void *buffer, int idx, void *reduce_data); /// Macros for allocating variables in different address spaces. ///{ // Follows the pattern in interface.h typedef enum omp_allocator_handle_t { omp_null_allocator = 0, omp_default_mem_alloc = 1, omp_large_cap_mem_alloc = 2, omp_const_mem_alloc = 3, omp_high_bw_mem_alloc = 4, omp_low_lat_mem_alloc = 5, omp_cgroup_mem_alloc = 6, omp_pteam_mem_alloc = 7, omp_thread_mem_alloc = 8, KMP_ALLOCATOR_MAX_HANDLE = ~(0U) } omp_allocator_handle_t; enum OMPTgtExecModeFlags : int8_t { OMP_TGT_EXEC_MODE_GENERIC = 1 << 0, OMP_TGT_EXEC_MODE_SPMD = 1 << 1, }; #define __PRAGMA(STR) _Pragma(#STR) #define OMP_PRAGMA(STR) __PRAGMA(omp STR) #define SHARED(NAME) \ NAME [[clang::loader_uninitialized]]; \ OMP_PRAGMA(allocate(NAME) allocator(omp_pteam_mem_alloc)) // TODO: clang should use address space 5 for omp_thread_mem_alloc, but right // now that's not the case. #define THREAD_LOCAL(NAME) \ NAME [[clang::loader_uninitialized, clang::address_space(5)]] // TODO: clang should use address space 4 for omp_const_mem_alloc, maybe it // does? #define CONSTANT(NAME) \ NAME [[clang::loader_uninitialized, clang::address_space(4)]] ///} #endif
stream.c
#include <stdio.h> #include <stdlib.h> #include <string.h> int main(int argc, char* argv[]) { const int LENGTH = 2000; printf("Allocating arrays of size %d elements.\n", LENGTH); #pragma 0 double* a = (double*) malloc(sizeof(double) * LENGTH); #pragma 0 double* b = (double*) malloc(sizeof(double) * LENGTH); #pragma 0 double* fast_c = (double*) malloc(sizeof(double) * LENGTH); #pragma default 1 // mlm_set_pool(1); printf("Allocation for fast_c is %llu\n", (unsigned long long int) fast_c); double* c = (double*) malloc(sizeof(double) * LENGTH); printf("Done allocating arrays.\n"); int i; for(i = 0; i < LENGTH; ++i) { a[i] = i; b[i] = LENGTH - i; c[i] = 0; } // Issue a memory copy memcpy(fast_c, c, sizeof(double) * LENGTH); printf("Perfoming the fast_c compute loop...\n"); #pragma omp parallel for for(i = 0; i < LENGTH; ++i) { fast_c[i] = 2.0 * a[i] + 1.5 * b[i]; } // Now copy results back memcpy(c, fast_c, sizeof(double) * LENGTH); double sum = 0; for(i = 0; i < LENGTH; ++i) { sum += c[i]; } printf("Sum of arrays is: %f\n", sum); printf("Freeing arrays...\n"); free(a); free(b); free(c); free(fast_c); printf("Done.\n"); }
data_4D.c
/* This file is part of the MCsquare software Copyright © 2016-2017 Université catholique de Louvain (UCL) All rights reserved. The MCsquare software has been developed by Kevin Souris from UCL in the context of a collaboration with IBA s.a. Each use of this software must be attributed to Université catholique de Louvain (UCL, Louvain-la-Neuve). Any other additional authorizations may be asked to LTTO@uclouvain.be. The MCsquare software is released under the terms of the open-source Apache 2.0 license. Anyone can use or modify the code provided that the Apache 2.0 license conditions are met. See the Apache 2.0 license for more details https://www.apache.org/licenses/LICENSE-2.0 The MCsquare software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "include/data_4D.h" DATA_CT **Import_4DCT(DATA_config *config){ char phase_file_path[200]; config->Num_4DCT_phases = 0; int i; DATA_CT **CT = NULL; // count number of phases. sprintf(phase_file_path, "./4DCT/CT_1.mhd"); FILE *file_mhd = fopen(phase_file_path,"r"); while(file_mhd != NULL){ config->Num_4DCT_phases += 1; fclose(file_mhd); sprintf(phase_file_path, "./4DCT/CT_%d.mhd", config->Num_4DCT_phases+1); file_mhd = fopen(phase_file_path,"r"); } if(config->Num_4DCT_phases == 0){ printf("\n Error: 4D-CT phases not found in directory \"./4DCT\"\n\n"); return NULL; } printf("\n4D data: %d phases found in directory \"./4DCT\"\n", config->Num_4DCT_phases); CT = (DATA_CT**)malloc(config->Num_4DCT_phases * sizeof(DATA_CT*)); for(i=0; i < config->Num_4DCT_phases; i++) CT[i] = NULL; // import HU conversion data CT[0] = (DATA_CT*) malloc(sizeof(DATA_CT)); CT[0]->Conversion_HU_Density = NULL; CT[0]->Conversion_Densities = NULL; CT[0]->Conversion_HU_Material = NULL; CT[0]->Conversion_Density_Material = NULL; CT[0]->Conversion_Material_labels = NULL; if(Read_Density_conversion_data(config->HU_Density_File, CT[0]) != 0){ Free_4DCT(CT, config->Num_4DCT_phases); return NULL; } // Display_Density_conversion_data(CT); if(Read_Material_conversion_data(config->HU_Material_File, CT[0]) != 0){ Free_4DCT(CT, config->Num_4DCT_phases); return NULL; } // Display_Material_conversion_data(CT); // import all phases int GridSize[3]; VAR_DATA VoxelLength[3], Origin[3]; int phaseID = 0; VAR_DATA *hu; for(phaseID=0; phaseID<config->Num_4DCT_phases; phaseID++){ printf(" Loading phase %d\n", phaseID+1); if(phaseID != 0){ CT[phaseID] = (DATA_CT*) malloc(sizeof(DATA_CT)); CT[phaseID]->Conversion_HU_Density = CT[0]->Conversion_HU_Density; CT[phaseID]->Conversion_Densities = CT[0]->Conversion_Densities; CT[phaseID]->Conversion_HU_Material = CT[0]->Conversion_HU_Material; CT[phaseID]->Conversion_Density_Material = CT[0]->Conversion_Density_Material; CT[phaseID]->Conversion_Material_labels = CT[0]->Conversion_Material_labels; CT[phaseID]->Num_Density_Data = CT[0]->Num_Density_Data; CT[phaseID]->Num_Materials_Data = CT[0]->Num_Materials_Data; } sprintf(phase_file_path, "./4DCT/CT_%d.mhd", phaseID+1); hu = import_MHD_image(phase_file_path, GridSize, VoxelLength, Origin); if(hu == NULL){ Free_4DCT(CT, config->Num_4DCT_phases); return NULL; } CT[phaseID]->GridSize[0] = GridSize[0]; CT[phaseID]->GridSize[1] = GridSize[1]; CT[phaseID]->GridSize[2] = GridSize[2]; CT[phaseID]->Nbr_voxels = GridSize[0]*GridSize[1]*GridSize[2]; CT[phaseID]->Length[0] = VoxelLength[0]*GridSize[0]; CT[phaseID]->Length[1] = VoxelLength[1]*GridSize[1]; CT[phaseID]->Length[2] = VoxelLength[2]*GridSize[2]; CT[phaseID]->VoxelLength[0] = VoxelLength[0]; CT[phaseID]->VoxelLength[1] = VoxelLength[1]; CT[phaseID]->VoxelLength[2] = VoxelLength[2]; CT[phaseID]->Origin[0] = Origin[0]; CT[phaseID]->Origin[1] = Origin[1]; CT[phaseID]->Origin[2] = Origin[2]; CT[phaseID]->density = (VAR_DATA*)malloc(CT[phaseID]->Nbr_voxels * sizeof(VAR_DATA)); CT[phaseID]->material = (unsigned short int*)malloc(CT[phaseID]->Nbr_voxels * sizeof(unsigned short int)); #pragma omp parallel for private(i) for(i=0; i<CT[phaseID]->Nbr_voxels; i++){ CT[phaseID]->density[i] = (VAR_DATA)HU_to_Density_convertion(hu[i], CT[phaseID]); CT[phaseID]->material[i] = (unsigned short int)Density_to_Material_convertion(CT[phaseID]->density[i], CT[phaseID]); } free(hu); } // Verify that all CT images have the same properties for(phaseID=0; phaseID<config->Num_4DCT_phases; phaseID++){ if(CT[phaseID]->GridSize[0] != GridSize[0] || CT[phaseID]->GridSize[1] != GridSize[1] || CT[phaseID]->GridSize[2] != GridSize[2]){ printf("\n Error: all phases of the imported 4DCT doesn't have the same grid size\n\n"); Free_4DCT(CT, config->Num_4DCT_phases); return NULL; } if(CT[phaseID]->VoxelLength[0] != VoxelLength[0] || CT[phaseID]->VoxelLength[1] != VoxelLength[1] || CT[phaseID]->VoxelLength[2] != VoxelLength[2]){ printf("\n Error: all phases of the imported 4DCT doesn't have the same voxel size\n\n"); Free_4DCT(CT, config->Num_4DCT_phases); return NULL; } if(CT[phaseID]->Origin[0] != Origin[0] || CT[phaseID]->Origin[1] != Origin[1] || CT[phaseID]->Origin[2] != Origin[2]){ printf("\n Error: all phases of the imported 4DCT doesn't have the same origin\n\n"); Free_4DCT(CT, config->Num_4DCT_phases); return NULL; } } printf("\n"); return CT; } VAR_DATA *Import_Def_Field(char *file_path, int *GridSize, VAR_DATA *Spacing, VAR_DATA *Origin){ int i, j, k, l, m; VAR_DATA *Field_tmp = import_MHD_image(file_path, GridSize, Spacing, Origin); if(Field_tmp == NULL) return NULL; int NumVoxels = GridSize[0]*GridSize[1]*GridSize[2]; VAR_DATA *Field = (VAR_DATA*)malloc(3*NumVoxels * sizeof(VAR_DATA)); m = 0; for(j=0; j<GridSize[2]; j++){ for(k=0; k<GridSize[1]; k++){ for(l=0; l<GridSize[0]; l++){ Field[m] = Field_tmp[3*m+1]/(10*Spacing[1]); Field[m+NumVoxels] = Field_tmp[3*m+0]/(10*Spacing[0]); Field[m+2*NumVoxels] = Field_tmp[3*m+2]/(10*Spacing[2]); m += 1; }}} free(Field_tmp); return Field; } DATA_4D_Fields *Import_4D_Fields(DATA_config *config){ int i; char file_path[200]; DATA_4D_Fields *Fields = NULL; int Fields_GridSize[4]; VAR_DATA Fields_Spacing[3], Fields_Origin[3]; config->Num_4DCT_phases = 0; // count number of phases. sprintf(file_path, "./Fields/Field_Ref_to_phase1.mhd"); FILE *file_mhd = fopen(file_path,"r"); while(file_mhd != NULL){ config->Num_4DCT_phases += 1; fclose(file_mhd); sprintf(file_path, "./Fields/Field_Ref_to_phase%d.mhd", config->Num_4DCT_phases+1); file_mhd = fopen(file_path,"r"); } if(config->Num_4DCT_phases < 1){ printf("\n Error: The number of fields to be imported must be >= 1.\n\n"); return NULL; } printf("\n4D data: %d phases found in directory \"./Fields\"\n", config->Num_4DCT_phases); Fields = (DATA_4D_Fields*) malloc(sizeof(DATA_4D_Fields)); Fields->Nbr_Fields = config->Num_4DCT_phases; Fields->Phase2Ref = (VAR_DATA**)malloc(config->Num_4DCT_phases * sizeof(VAR_DATA*)); Fields->Ref2Phase = (VAR_DATA**)malloc(config->Num_4DCT_phases * sizeof(VAR_DATA*)); for(i=0; i < config->Num_4DCT_phases; i++){ Fields->Phase2Ref[i] = NULL; Fields->Ref2Phase[i] = NULL; } if(config->Field_type == 1){ // Field type == Velocity Fields->Ref2Phase_log = (VAR_DATA**)malloc(config->Num_4DCT_phases * sizeof(VAR_DATA*)); for(i=0; i<config->Num_4DCT_phases; i++){ printf(" Loading deformation field %d\n", i+1); sprintf(file_path, "./Fields/Field_Ref_to_phase%d.mhd", i+1); Fields->Ref2Phase_log[i] = Import_Def_Field(file_path, Fields_GridSize, Fields_Spacing, Fields_Origin); if(Fields->Ref2Phase_log[i] == NULL){ Free_4D_Fields(Fields); return NULL; } } Fields->GridSize[0] = 3; Fields->GridSize[1] = Fields_GridSize[0]; Fields->GridSize[2] = Fields_GridSize[1]; Fields->GridSize[3] = Fields_GridSize[2]; Fields->Spacing[0] = Fields_Spacing[0]; Fields->Spacing[1] = Fields_Spacing[1]; Fields->Spacing[2] = Fields_Spacing[2]; Fields->Origin[0] = Fields_Origin[0]; Fields->Origin[1] = Fields_Origin[1]; Fields->Origin[2] = Fields_Origin[2]; printf(" Fields exponentiation\n"); for(i=0; i<config->Num_4DCT_phases; i++){ Fields->Phase2Ref[i] = Field_exponentiation(Fields->Ref2Phase_log[i], Fields->GridSize, Fields->Spacing, Fields->Origin, 1); Fields->Ref2Phase[i] = Field_exponentiation(Fields->Ref2Phase_log[i], Fields->GridSize, Fields->Spacing, Fields->Origin, 0); } } else{ // Field type == Displacement Fields->Ref2Phase_log = NULL; for(i=0; i<config->Num_4DCT_phases; i++){ printf(" Loading deformation field %d\n", i+1); sprintf(file_path, "./Fields/Field_phase%d_to_Ref.mhd", i+1); Fields->Phase2Ref[i] = Import_Def_Field(file_path, Fields_GridSize, Fields_Spacing, Fields_Origin); if(Fields->Phase2Ref[i] == NULL){ Free_4D_Fields(Fields); return NULL; } sprintf(file_path, "./Fields/Field_Ref_to_phase%d.mhd", i+1); Fields->Ref2Phase[i] = Import_Def_Field(file_path, Fields_GridSize, Fields_Spacing, Fields_Origin); if(Fields->Ref2Phase[i] == NULL){ Free_4D_Fields(Fields); return NULL; } } Fields->GridSize[0] = 3; Fields->GridSize[1] = Fields_GridSize[0]; Fields->GridSize[2] = Fields_GridSize[1]; Fields->GridSize[3] = Fields_GridSize[2]; Fields->Spacing[0] = Fields_Spacing[0]; Fields->Spacing[1] = Fields_Spacing[1]; Fields->Spacing[2] = Fields_Spacing[2]; Fields->Origin[0] = Fields_Origin[0]; Fields->Origin[1] = Fields_Origin[1]; Fields->Origin[2] = Fields_Origin[2]; } printf("\n"); return Fields; } void Free_4D_Fields(DATA_4D_Fields *Fields){ if(Fields == NULL) return; int i; for(i=0; i<Fields->Nbr_Fields; i++){ if(Fields->Phase2Ref[i] != NULL) free(Fields->Phase2Ref[i]); if(Fields->Ref2Phase[i] != NULL) free(Fields->Ref2Phase[i]); } if(Fields->Ref2Phase_log != NULL){ for(i=0; i<Fields->Nbr_Fields; i++){ if(Fields->Ref2Phase_log[i] != NULL) free(Fields->Ref2Phase_log[i]); } } if(Fields->Phase2Ref != NULL) free(Fields->Phase2Ref); if(Fields->Ref2Phase != NULL) free(Fields->Ref2Phase); if(Fields->Ref2Phase_log != NULL) free(Fields->Ref2Phase_log); return; } void Free_4DCT(DATA_CT **CT, int Nbr_phases){ if(CT == NULL) return; int i; for(i=0; i<Nbr_phases; i++){ if(i != 0){ CT[i]->Conversion_HU_Density = NULL; CT[i]->Conversion_Densities = NULL; CT[i]->Conversion_HU_Material = NULL; CT[i]->Conversion_Density_Material = NULL; CT[i]->Conversion_Material_labels = NULL; } if(CT[i] != NULL) Free_CT_DATA(CT[i]); } free(CT); return; }
raytracing.c
#include "raytracing.h" void *custom_malloc(size_t size) { void *tmp = get_or_free_memory(size, 0, 0); return tmp; } void *custom_calloc(size_t num, size_t size) { void *tmp = custom_malloc(num * size); memset(tmp, 0, num * size); return tmp; } void custom_free(void *ptr) { get_or_free_memory(0, ptr, 1); } unsigned long hash(unsigned char *str) { // http://www.cse.yorku.ca/~oz/hash.html unsigned long hash = 5381; int c; while (c = *str++) hash = ((hash << 5) + hash) + c; /* hash * 33 + c */ return hash; } unsigned long hash_ptr(void *ptr) { char str[16]; sprintf(str, "%p", ptr); return hash(str)%4096; } unsigned long hash_int(size_t num) { char str[16]; sprintf(str, "%zx", num); return hash(str)%256; } struct memory_register *init_memory_register() { struct memory_register *mem_reg = calloc(1, sizeof(struct memory_register)); mem_reg->malloc_array = calloc(256, sizeof(struct memory_block *)); mem_reg->free_array = calloc(4096, sizeof(struct memory_block *)); return mem_reg; } void destroy_memory_register(struct memory_register *mem_reg) { int ctr; for (ctr = 0; ctr < 256; ++ctr) { if (*(mem_reg->malloc_array + ctr) != 0) { destroy_memory_block(*(mem_reg->malloc_array + ctr)); } } for (ctr = 0; ctr < 4096; ++ctr) { if (*(mem_reg->free_array + ctr) != 0) { destroy_memory_block(*(mem_reg->free_array + ctr)); } } free(mem_reg->malloc_array); free(mem_reg->free_array); free(mem_reg); } struct memory_block *init_memory_block(size_t size) { struct memory_block *mem_blk = malloc(sizeof(struct memory_block)); mem_blk->size = size; mem_blk->ptr = malloc(size); mem_blk->sizehash = (int) hash_int(size); mem_blk->ptrhash = (int) hash_ptr(mem_blk->ptr); mem_blk->next = 0; return mem_blk; } void destroy_memory_block(struct memory_block *mem_blk) { if (mem_blk->next != 0) { destroy_memory_block(mem_blk->next); } free(mem_blk->ptr); free(mem_blk); } void *get_or_free_memory(size_t size, void *ptr, int zero_for_get) { // negative zero_for_get to destroy all static struct memory_register *mem_reg = 0; static omp_lock_t memorylock; void *ptr_n = 0; if (mem_reg == 0) { mem_reg = init_memory_register(); omp_init_lock(&memorylock); } omp_set_lock(&memorylock); // free allocated memory if zero_for_get is positive if (zero_for_get > 0) { free_memory(ptr, mem_reg); } if (zero_for_get == 0) { ptr_n = malloc_memory(size, mem_reg); } omp_unset_lock(&memorylock); if (mem_reg != 0 && zero_for_get < 0) { destroy_memory_register(mem_reg); mem_reg = 0; omp_destroy_lock(&memorylock); } return ptr_n; } static void free_memory(void *ptr, struct memory_register *mem_reg) { // find block int hash_p = (int) hash_ptr(ptr); struct memory_block *mem_blk = *(mem_reg->free_array + hash_p); struct memory_block *mem_prv = 0; while (mem_blk->ptr != ptr) { mem_prv = mem_blk; mem_blk = mem_blk->next; } // remove block from free_array, zero memory if (mem_prv != 0) { mem_prv->next = mem_blk->next; } else { *(mem_reg->free_array + hash_p) = mem_blk->next; // handles case when next is null also } mem_blk->next = 0; // move to appropriate location in malloc_array struct memory_block *m_mem_blk = *(mem_reg->malloc_array + mem_blk->sizehash); // add to the beginning of malloc_array linked list if (m_mem_blk != 0) mem_blk->next = m_mem_blk; *(mem_reg->malloc_array + mem_blk->sizehash) = mem_blk; } static void *malloc_memory(size_t size, struct memory_register *mem_reg) { // find block int hash_i = (int) hash_int(size); struct memory_block *mem_blk = *(mem_reg->malloc_array + hash_i); struct memory_block *mem_prv = 0; // find memory block of appropriate size since // multiple sizes may map to the same hash while (mem_blk != 0 && mem_blk->size != size) { mem_prv = mem_blk; mem_blk = mem_blk->next; } if (mem_blk == 0) { mem_blk = init_memory_block(size); } else { // disconnect mem_blk from malloc_array if (mem_prv == 0) { *(mem_reg->malloc_array + hash_i) = mem_blk->next; } else { mem_prv->next = mem_blk->next; } mem_blk->next = 0; } assert(mem_blk->next == 0); // add mem_blk to the beginning of appropriate link of free_array struct memory_block *mem_blk_f = *(mem_reg->free_array + mem_blk->ptrhash); if (mem_blk_f == 0) { *(mem_reg->free_array + mem_blk->ptrhash) = mem_blk; } else { mem_blk->next = mem_blk_f->next; mem_blk_f->next = mem_blk; } return mem_blk->ptr; } void release_all_blocks() { get_or_free_memory(0, 0, -1); } int counter() { static int ctr = 0; return ctr++; } struct receiver_ray_ribbon_ll_node *init_receiver_ray_ribbon_ll_node() { struct receiver_ray_ribbon_ll_node *rnll = custom_calloc(1, sizeof(struct receiver_ray_ribbon_ll_node)); return rnll; } struct receiver_ray_ribbon *init_receiver_ray_ribbon( struct receiver *rx, const struct ray_ribbon *ribbon, struct environment *env) { struct receiver_ray_ribbon *rrbn_new = custom_calloc(1, sizeof(struct receiver_ray_ribbon)); const struct perfect_reflector **prconst = (const struct perfect_reflector **) env->prarray; rrbn_new->ribbon = refine_ray_ribbon_image(ribbon->start_tx, ribbon, rx, prconst); rrbn_new->start_tx = ribbon->start_tx; if (rrbn_new->ribbon == 0) { destroy_receiver_ray_ribbon(rrbn_new); return 0; } return rrbn_new; } bool populate_if_ray_ribbon_doesnt_exist (const struct ray_ribbon *rb1, struct receiver *rx, struct environment *env) { // if type is the same declare to be the same struct receiver_ray_ribbon_ll_node *rnll = rx->rlln; struct receiver_ray_ribbon_ll_node *rnprev = 0; while (rnll != 0) { assert(rnll->rrbn != 0); assert(rnll->rrbn->ribbon != 0); if (check_same_type(rb1, rnll->rrbn->ribbon)) return false; rnprev = rnll; rnll = rnll->next; } struct receiver_ray_ribbon *rrbn_new = init_receiver_ray_ribbon(rx, rb1, env); if (!rrbn_new) return false; // add rb1 to rx if (rnprev == 0) { rx->rlln = init_receiver_ray_ribbon_ll_node(); rx->rlln->rrbn = rrbn_new; } else { rnprev->next = init_receiver_ray_ribbon_ll_node(); rnprev->next->rrbn = rrbn_new; } return true; } void populate_receiver_ray_ribbons(struct environment *env) { if (env->tx_paths_updated_rx_paths_updated) { return; } fprintf(stderr, "Updating receiver ribbons\n"); #pragma omp parallel for shared(env) for (int ctrrx = 0; ctrrx < env->num_receivers; ++ctrrx) { struct receiver *rx = *(env->receivers_array + ctrrx); int ctrprx = 0; int ctrtx = 0; struct ray_ribbon_array *rbnarr = *(env->tx_paths + ctrtx); while (rbnarr != 0) { int ctrp = 0; struct ray_ribbon *rbn = *(rbnarr->ribbons); while (rbn != 0) { rbn->start_tx = *(env->transmitters_array + ctrtx); bool added = populate_if_ray_ribbon_doesnt_exist( rbn, rx, env); if (added) fprintf(stderr, "Added ribbon\n"); ++ctrp; rbn = *(rbnarr->ribbons + ctrp); } ++ctrtx; rbnarr = *(env->tx_paths + ctrtx); } } env->tx_paths_updated_rx_paths_updated = true; } void update_all_receiver_ray_ribbons(struct environment *env) { #pragma parallel for private(ctrrx) shared(env) for (int ctrrx = 0; ctrrx < env->num_receivers; ++ctrrx) { struct receiver *rx = *(env->receivers_array + ctrrx); update_receiver_ray_ribbons(rx, env); update_receiver_ray_ribbons_signal_buffer(rx, env); } } bool update_receiver_ray_ribbons(struct receiver *rx, struct environment *env) { // this function updates the ray ribbon spatial parameters // based on the spatial locations // of the transmitter and the receiver, returns true struct receiver_ray_ribbon_ll_node *rlln = rx->rlln; const struct perfect_reflector **prconst = (const struct perfect_reflector **) env->prarray; while (rlln != 0) { struct ray_ribbon *refined_ribbon = 0; if (rlln->rrbn->ribbon) { refined_ribbon = refine_ray_ribbon_image( rlln->rrbn->ribbon->start_tx, rlln->rrbn->ribbon, rx, prconst); destroy_ray_ribbon(rlln->rrbn->ribbon); } if (refined_ribbon != 0) { refined_ribbon->start_tx = rlln->rrbn->ribbon->start_tx; refined_ribbon->end_rx = rx; rlln->rrbn->ribbon = refined_ribbon; update_receiver_ribbon_delay_dopplers(rlln->rrbn, env); } else { // tx path update is triggered whenever ray is lost // env->tx_paths_updated = false; rlln->rrbn->ribbon = 0; } rlln = rlln->next; } return true; } void update_receiver_ray_ribbons_signal_buffer(struct receiver *rx, struct environment *env) { // this function updates the buffer just before readout and removes // all outdated signals // code to update buffers struct receiver_ray_ribbon_ll_node *rlln = rx->rlln; struct receiver_ray_ribbon_ll_node *rllnprev = 0; while (rlln != 0) { /* if (rlln->rrbn->ribbon == 0 && rlln->rrbn->signal->next == 0 && */ /* rlln->rrbn->signal->receiver_read && */ /* rlln->rrbn->signal->transmit_time */ /* + rlln->rrbn->signal->delay + */ /* env->delta_time < env->time) { */ if (rlln->rrbn->ribbon == 0) { if (rllnprev != 0) { rllnprev->next = rlln->next; rlln->next = 0; destroy_receiver_ray_ribbon_ll_node(rlln); rlln = rllnprev->next; } else { rx->rlln = rlln->next; rlln->next = 0; destroy_receiver_ray_ribbon_ll_node(rlln); rlln = rx->rlln; } } else { // add current signal first struct signal_buffer *signal = rlln->rrbn->signal; if (signal == 0) { signal = init_signal_buffer(); assert(rlln->rrbn->start_tx != 0); signal->signal = rlln->rrbn->start_tx->baseband_signal; signal->transmit_time = env->time; signal->delay = rlln->rrbn->delay; rlln->rrbn->signal = signal; } else { while(signal->next != 0) { signal = signal->next; } signal->next = init_signal_buffer(); signal->next->delay = rlln->rrbn->delay; signal->next->signal = rlln->rrbn->start_tx->baseband_signal; signal->next->transmit_time = env->time; } // remove signal if there are newer signals in the buffer while (rlln->rrbn->signal != 0 && rlln->rrbn->signal->next != 0 && rlln->rrbn->signal->next->transmit_time + rlln->rrbn->signal->next->delay < env->time) { rlln->rrbn->signal = destroy_signal_buffer_first(rlln->rrbn->signal); } rllnprev = rlln; rlln = rlln->next; } } } void destroy_receiver_ray_ribbon_ll_node(struct receiver_ray_ribbon_ll_node *p) { destroy_receiver_ray_ribbon(p->rrbn); custom_free(p); } void destroy_receiver_ray_ribbon(struct receiver_ray_ribbon *rrbn) { destroy_signal_buffer(rrbn->signal); if (rrbn->ribbon != 0) { destroy_ray_ribbon(rrbn->ribbon); } custom_free(rrbn); } struct signal_buffer *init_signal_buffer() { struct signal_buffer *sgn = custom_calloc(1, sizeof(struct signal_buffer)); return sgn; } void destroy_signal_buffer(struct signal_buffer *sgn) { if (sgn == 0) return; if (sgn->next != 0) { destroy_signal_buffer(sgn->next); } custom_free(sgn); } struct signal_buffer *destroy_signal_buffer_first(struct signal_buffer *sgn) { struct signal_buffer *sgnnext = sgn->next; sgn->next = 0; destroy_signal_buffer(sgn); return sgnnext; } struct ray_ribbon_array *init_ray_ribbon_array(int number) { struct ray_ribbon_array *rarr = custom_calloc(1, sizeof(struct ray_ribbon_array)); rarr->ribbons = custom_calloc(number, sizeof(struct ray_ribbon *)); rarr->max_len = number; return rarr; } void populate_ray_ribbon_array(struct transmitter *tx, const struct perfect_reflector **ref_arr, struct ray_ribbon_array *rarr, int num_divs, int num_ref, bool single_type) { populate_ray_ribbon_array_long(tx, ref_arr, rarr, num_ref, -PI / 2, PI / 2, PI / num_divs, 0, 2 * PI, 2 * PI / num_divs, single_type); } void populate_ray_ribbon_array_long(struct transmitter *tx, const struct perfect_reflector **ref_arr, struct ray_ribbon_array *rarr, int num_ref, const double phi_start, const double phi_end, const double phi_delta, const double thet_start, const double thet_end, const double thet_delt, bool single_type) { int num_points = (1 + floor((phi_end - phi_start) / phi_delta)) * (1 + floor((thet_end - thet_start) / thet_delt)); if (_RAYTRACING_DEBUG) fprintf(stderr, "Number is %d\n", num_points); double complex *angles = custom_malloc(num_points * sizeof(double complex)); int ctr = 0; double phi, theta; for (phi = phi_start; phi < phi_end; phi += phi_delta) { for (theta = thet_start; theta < thet_end; theta += thet_delt) { *(angles + ctr) = phi + I * theta; ++ctr; } } populate_ray_ribbon_array_full_copy(tx, ref_arr, num_ref, ctr, angles, rarr, single_type); custom_free(angles); } void populate_ray_ribbon_array_full_copy(const struct transmitter *tx, const struct perfect_reflector **ref_arr, int num_ref, int num_points, const double complex *angles, struct ray_ribbon_array *rarr, bool single_type) { int ctr = 0; #pragma omp parallel shared(rarr, angles) { struct ribbon_node *rn = 0; struct ray_ribbon *rb = 0; { rn = init_chain_of_ribbon_nodes(6); rb = init_ray_ribbon(rn); rb->start_tx = tx; } #pragma omp for private(ctr) for (ctr = 0; ctr < num_points; ++ctr) { double phi, theta; phi = creal(*(angles + ctr)); theta = cimag(*(angles + ctr)); cblas_dcopy(3, tx->gn->smm->position, 1, rn->current->point, 1); double direction[3] = {cos(phi), sin(phi) * cos(theta), sin(phi) * sin(theta)}; cblas_dcopy(3, direction, 1, rn->current->unit_direction, 1); bool hit_des = process_vertical_chain_nomalloc(rn, ref_arr, num_ref); if (hit_des) { #pragma omp critical add_ray_ribbon_copy(rarr, rb, single_type); } } destroy_ray_ribbon(rb); } } struct ray_ribbon *init_ray_ribbon(struct ribbon_node *rn) { struct ray_ribbon *rb = custom_calloc(1, sizeof(struct ray_ribbon)); rb->head = rn; return rb; } struct ribbon_node *init_ribbon_node() { struct ribbon_node *rn = custom_calloc(1, sizeof(struct ribbon_node)); rn->current = custom_calloc(1, sizeof(struct half_infinite_ray)); rn->ctr = counter(); rn->surface_index = -1; return rn; } struct ribbon_node *init_ribbon_node_from_copy(const struct ribbon_node *rn) { struct ribbon_node *rn_tmp = init_ribbon_node(); rn_tmp->hit_destination_patch = rn->hit_destination_patch; rn_tmp->num_reflections = rn->num_reflections; rn_tmp->ctr = rn->ctr; rn_tmp->surface_index = rn->surface_index; *(rn_tmp->current) = *(rn->current); return rn_tmp; } struct ribbon_node *init_ribbon_node_from_points(const double *pt1, const double *pt2) { struct ribbon_node *rn_tmp = init_ribbon_node(); cblas_dcopy(3, pt1, 1, rn_tmp->current->point, 1); cblas_dcopy(3, pt2, 1, rn_tmp->current->end_pt, 1); double diff_v[3]; diff(pt1, pt2, diff_v); rn_tmp->current->length = normalize_unit_vector(diff_v); cblas_dcopy(3, diff_v, 1, rn_tmp->current->unit_direction, 1); return rn_tmp; } struct ribbon_node *init_chain_of_ribbon_nodes(int length_of_node) { int ctr = 0; struct ribbon_node *rn = 0; struct ribbon_node *rn_tmp = 0; struct ribbon_node *rn_prev = 0; while (ctr < length_of_node) { rn_tmp = init_ribbon_node(); rn_tmp->num_reflections = ctr; if (rn == 0) { rn = rn_tmp; } else { rn_prev->down = rn_tmp; } rn_prev = rn_tmp; ++ctr; } return rn; } struct ribbon_node *copy_chain_of_ribbon_nodes(const struct ribbon_node *rn) { struct ribbon_node *rn_new = 0; struct ribbon_node *rn_tmp = 0; struct ribbon_node *rn_tmp_prev = 0; while (rn != 0) { rn_tmp = init_ribbon_node(); rn_tmp->hit_destination_patch = rn->hit_destination_patch; rn_tmp->num_reflections = rn->num_reflections; rn_tmp->ctr = rn->ctr; rn_tmp->surface_index = rn->surface_index; *(rn_tmp->current) = *(rn->current); if (rn_new == 0) { rn_new = rn_tmp; } else { rn_tmp_prev->down = rn_tmp; } rn = rn->down; rn_tmp_prev = rn_tmp; } return rn_new; } struct ribbon_node *copy_chain_of_ribbon_nodes_till_dest(const struct ribbon_node *rn) { struct ribbon_node *rn_new = 0; struct ribbon_node *rn_tmp = 0; struct ribbon_node *rn_tmp_prev = 0; bool isfinal = false; while (rn != 0 && !isfinal) { rn_tmp = init_ribbon_node(); rn_tmp->hit_destination_patch = rn->hit_destination_patch; rn_tmp->num_reflections = rn->num_reflections; rn_tmp->ctr = rn->ctr; rn_tmp->surface_index = rn->surface_index; *(rn_tmp->current) = *(rn->current); if (rn_new == 0) { rn_new = rn_tmp; } else { rn_tmp_prev->down = rn_tmp; } isfinal = rn->hit_destination_patch; rn = rn->down; rn_tmp_prev = rn_tmp; } return rn_new; } struct ray_ribbon *copy_ray_ribbon(const struct ray_ribbon *rb, bool till_dest) { struct ray_ribbon *rbnew = custom_malloc(sizeof(struct ray_ribbon)); if (till_dest) { rbnew->head = copy_chain_of_ribbon_nodes_till_dest(rb->head); } else { rbnew->head = copy_chain_of_ribbon_nodes(rb->head); } rbnew->start_tx = rb->start_tx; rbnew->end_rx = rb->end_rx; rbnew->delay = rb->delay; rbnew->doppler = rb->doppler; rbnew->integrated_doppler_phase = rb->integrated_doppler_phase; rbnew->gain = rb->gain; rbnew->reflection_phase = rb->reflection_phase; return rbnew; } void destroy_ray_ribbon(struct ray_ribbon *rb) { if (rb == 0) return; destroy_ray_ribbon_nodes(rb); custom_free(rb); } void destroy_ray_ribbon_nodes(struct ray_ribbon *rb) { if (rb == 0) return; struct ribbon_node *rn = rb->head; destroy_chain_of_ribbon_nodes(rn); rb->head = 0; } void destroy_ray_ribbon_array(struct ray_ribbon_array *array) { if (array == 0) return; int ctr = 0; while (*(array->ribbons + ctr) != NULL) { destroy_ray_ribbon_nodes(*(array->ribbons + ctr)); custom_free(*(array->ribbons + ctr)); ++ctr; } custom_free(array->ribbons); custom_free(array); } void destroy_ray_ribbon_array_all_but_first(struct ray_ribbon_array *array) { if (array == 0) return; int ctr = 1; while (*(array->ribbons + ctr) != NULL) { destroy_ray_ribbon_nodes(*(array->ribbons + ctr)); custom_free(*(array->ribbons + ctr)); ++ctr; } custom_free(array->ribbons); custom_free(array); } void destroy_ray_ribbon_array_ribbons(struct ray_ribbon_array *array) { int ctr = 0; if (array == 0) return; while (*(array->ribbons + ctr) != NULL) { destroy_ray_ribbon(*(array->ribbons + ctr)); ++ctr; } custom_free(array->ribbons); array->max_len = 0; array->current_len = 0; array->ribbons = 0; } void destroy_chain_of_ribbon_nodes(struct ribbon_node *rn) { if (rn == NULL) return; if (rn->current != NULL) custom_free(rn->current); struct ribbon_node *rndown = rn->down; custom_free(rn); destroy_chain_of_ribbon_nodes(rndown); } void destroy_ribbon_node(struct ribbon_node *rn) { if (rn == NULL) return; if (rn->current != NULL) custom_free(rn->current); custom_free(rn); } bool check_same_type(const struct ray_ribbon *ray_rb1, const struct ray_ribbon *ray_rb2) { struct ribbon_node *rn1 = ray_rb1->head; struct ribbon_node *rn2 = ray_rb2->head; // same type if same transmitter and same reflectors if (ray_rb1->start_tx != ray_rb2->start_tx) return false; bool hit1, hit2; while(rn1 != NULL && rn2 != NULL) { hit1 = rn1->hit_destination_patch; hit2 = rn2->hit_destination_patch; if (rn1->surface_index != rn2->surface_index) return false; rn1 = rn1->down; rn2 = rn2->down; } assert(hit1 || hit2); return (hit1 == hit2); //return (rn1 == NULL && rn2 == NULL); } bool add_ray_ribbon(struct ray_ribbon_array *array, struct ray_ribbon *rb, bool single_type) { if (rb == NULL) return false; if (single_type) { int ctr = 0; // check for type here while (*(array->ribbons + ctr) != 0) { if(check_same_type(*(array->ribbons + ctr), rb)) { return false; } ++ctr; } } *(array->ribbons + array->current_len) = rb; array->current_len++; // this enforces null termination *(array->ribbons + array->current_len) = 0; return true; } bool add_ray_ribbon_copy(struct ray_ribbon_array *array, const struct ray_ribbon *rb, bool single_type) { if (rb == NULL) return false; if (single_type) { int ctr = 0; // check for type here while (*(array->ribbons + ctr) != 0) { if(check_same_type(*(array->ribbons + ctr), rb)) { return false; } ++ctr; } } *(array->ribbons + array->current_len) = copy_ray_ribbon(rb, true); array->current_len++; // this enforces null termination *(array->ribbons + array->current_len) = 0; return true; } double complex compute_intersection(const struct half_infinite_ray *hr, const struct perfect_reflector *pr) { double t, sgn; double diff[3]; cblas_dcopy(3, hr->point, 1, diff, 1); cblas_daxpy(3, -1, pr->center_point, 1, diff, 1); t = -cblas_ddot(3, diff, 1, pr->unit_normal, 1) /cblas_ddot(3, hr->unit_direction, 1, pr->unit_normal, 1); //if (_RAYTRACING_DEBUG) fprintf(stderr, "t obtained is %lf\n", t); // check if t lies within the bounds of the patch if (t < INFINITY) { // Verify the signs cblas_daxpy(3, t, hr->unit_direction, 1, diff, 1); double lengtht = cblas_ddot(3, diff, 1, pr->unit_length_normal, 1); double widtht = cblas_ddot(3, diff, 1, pr->unit_width_normal, 1); if (fabs(lengtht) > pr->length / 2 || fabs(widtht) > pr->width / 2 || t < 0) { t = INFINITY; } } sgn = cblas_ddot(3, hr->unit_direction, 1, pr->unit_normal, 1); return t + I * sgn; } bool process_vertical_chain_nomalloc(struct ribbon_node *rn, const struct perfect_reflector **pr, int num_reflections) { // this function computes whether a ray can hit the // destination after a max num_reflections int ctr = 0, ctrindex = -1, num_reflectors = 0; double tmin = INFINITY, sgn = -1; const struct perfect_reflector *prsurf = *(pr + ctr); while(prsurf != NULL) { double complex dbl = compute_intersection(rn->current, prsurf); rn->current->length = creal(dbl); if (creal(dbl) < tmin && ctr != rn->surface_index) { tmin = creal(dbl); sgn = cimag(dbl); ctrindex = ctr; } ++ctr; prsurf = *(pr + ctr); } num_reflectors = ctr; if (sgn>0 || tmin>1e5) return false; if (ctrindex == num_reflectors - 1) { rn->hit_destination_patch = true; return true; } else { rn->hit_destination_patch = false; } if (rn->num_reflections > num_reflections) return false; // only case remaining is if there is intersection with // reflector and number of reflections is small // update starting point struct ribbon_node *rn_next = rn->down; cblas_dcopy(3, rn->current->point, 1, rn_next->current->point, 1); cblas_daxpy(3, tmin, rn->current->unit_direction, 1, rn_next->current->point, 1); rn_next->surface_index = ctrindex; // update ending point of previous ray cblas_dcopy(3, rn_next->current->point, 1, rn->current->end_pt, 1); // next update direction cblas_dcopy(3, rn->current->unit_direction, 1, rn_next->current->unit_direction, 1); const struct perfect_reflector *prsurface = pr[ctrindex]; double factor = -2*cblas_ddot(3, rn->current->unit_direction, 1, prsurface->unit_normal, 1); cblas_daxpy(3, factor, prsurface->unit_normal, 1, rn_next->current->unit_direction, 1); return process_vertical_chain_nomalloc(rn_next, pr, num_reflections); } void print_ray_ribbon(const struct ray_ribbon *rb) { fprintf(stderr, "Printing rayribbon: \n\n"); struct ribbon_node *rn = rb->head; int ctr = 0; while(rn != NULL) { fprintf(stderr, "Level %d:\n", ctr); print_ribbon_node(rn); rn = rn->down; ++ctr; } } void print_receiver_ray_ribbon(const struct receiver_ray_ribbon *rb) { fprintf(stderr, "Printing receiver rayribbon: \n\n"); assert(rb->ribbon); struct ribbon_node *rn = rb->ribbon->head; int ctr = 0; while(rn != NULL) { fprintf(stderr, "Level %d:\n", ctr); print_ribbon_node(rn); rn = rn->down; ++ctr; } fprintf(stderr, "Delay doppler for receiver rayribbon are:", "%lf ns; %lf;\n", (10e9) * rb->delay, rb->doppler); } void print_ray_ribbon_flattened(const struct ray_ribbon *rb) { struct ribbon_node *rn = rb->head; int ctr = 0; while (rn != NULL) { fprintf(stderr, "(%lf, %lf, %lf) -- (%lf, %lf, %lf) ", rn->current->point[0], rn->current->point[1], rn->current->point[2], rn->current->end_pt[0], rn->current->end_pt[1], rn->current->end_pt[2]); fprintf(stderr, "Surface index: %d; ", rn->surface_index); rn = rn->down; ++ctr; } fprintf(stderr, "\n"); } void print_ray_ribbon_array(const struct ray_ribbon_array *rarr) { struct ray_ribbon * rb; rb = *(rarr->ribbons); int ctr = 0; while (rb != NULL) { fprintf(stderr, "Printing ribbon %d ", ctr); print_ray_ribbon_flattened(rb); ++ctr; rb = *(rarr->ribbons + ctr); } } void print_vertical_strip(const struct ribbon_node *rn) { int ctr = 0; while (rn != NULL) { fprintf(stderr, "Level %d\n", ctr++); print_ribbon_node(rn); rn = rn->down; } } void print_ribbon_node(const struct ribbon_node *rn) { if (rn == NULL) return; fprintf(stderr, "Starting point: "); int ctr = 0; for(ctr = 0; ctr < 3; ++ctr) { fprintf(stderr, "%lf ", rn->current->point[ctr]); } fprintf(stderr, "Unit direction: "); for(ctr = 0; ctr < 3; ++ctr) { fprintf(stderr, "%lf ", rn->current->unit_direction[ctr]); } fprintf(stderr, "Ending point: "); for(ctr = 0; ctr < 3; ++ctr) { fprintf(stderr, "%lf ", rn->current->end_pt[ctr]); } fprintf(stderr, "Hit dest: %d, num reflec: %d, Surf index: %d", rn->hit_destination_patch, rn->num_reflections, rn->surface_index); fprintf(stderr, "\n"); } int count_segments(const struct ribbon_node *rn) { int ctr = 0; while (rn != NULL) { rn = rn->down; ++ctr; } return ctr; } void invert_spherical_angles(const double *unit_vector, double *phi, double *thet) { *thet = atan(unit_vector[2]/unit_vector[1]); *phi = acos(unit_vector[0]); if (sin(*phi) * sin(*thet) / unit_vector[2] < 0 ) *phi = 2 * PI - (*phi); } struct ray_ribbon *refine_ray_ribbon_image(const struct transmitter *tx, const struct ray_ribbon *rb, const struct receiver *rx, const struct perfect_reflector **pr) { if (rb == 0) return 0; // first count number of reflectors int cnt = count_ribbon_nodes(rb->head); // allocate addresses for virtual points double **virtual_points = custom_malloc(cnt * sizeof(double *)); double *zero_pt = custom_calloc(3, sizeof(double)); struct ribbon_node **rnnodes = custom_calloc(cnt, sizeof(struct ribbon_node *)); const struct perfect_reflector **prref = custom_calloc( cnt - 1, sizeof(struct perfect_reflector *)); int ctr = 0; *virtual_points = custom_malloc(3 * sizeof(double)); cblas_dcopy(3, tx->gn->smm->position, 1, *(virtual_points), 1); // compute reflected points struct ribbon_node *rn = rb->head; for (ctr = 0; ctr < cnt - 1; ++ctr) { *(rnnodes + ctr) = rn; *(virtual_points + ctr + 1) = custom_malloc(3 * sizeof(double)); cblas_dcopy(3, *(virtual_points + ctr), 1, *(virtual_points + ctr + 1), 1); int st_in = rn->down->surface_index; const struct perfect_reflector *prr = *(pr + st_in); *(prref + ctr) = prr; reflect(prr->center_point, prr->unit_normal, zero_pt, *(virtual_points + ctr + 1)); rn = rn->down; } *(rnnodes + ctr) = rn; // now work backwards to get points of intersection double *ptprev = rx->gn->smm->position; bool validrayribbon = true; for (ctr = cnt - 1; ctr > 0; --ctr) { struct ribbon_node *rn = init_ribbon_node_from_points( *(virtual_points + ctr), ptprev); rn->surface_index = (*(rnnodes + ctr))->surface_index; rn->num_reflections = (*(rnnodes + ctr))->num_reflections; double complex tsgn = compute_intersection( rn->current, *(prref + ctr - 1)); rn->current->length = creal(tsgn); cblas_dcopy(3, ptprev, 1, rn->current->end_pt, 1); if (creal(tsgn) > 1e5) { validrayribbon = false; ctr = 0; } else { cblas_daxpy(3, tsgn, rn->current->unit_direction, 1, rn->current->point, 1); *(rnnodes + ctr) = rn; ptprev = rn->current->point; } } // construct final ray ribbon struct ray_ribbon *rbfinal = 0; if (validrayribbon) { // construct ray ribbon struct ribbon_node *rn = init_ribbon_node_from_points(tx->gn->smm->position, ptprev); // init_ribbon_node(); struct ribbon_node *rninit = rn; rn->surface_index = -1; rn->num_reflections = 0; for (ctr = 1; ctr < cnt; ++ctr) { rn->down = *(rnnodes + ctr); rn = rn->down; rn->num_reflections = ctr; } rn->hit_destination_patch = true; rbfinal = init_ray_ribbon(rninit); rbfinal->start_tx = tx; rbfinal->end_rx = rx; } // destroy temp for (ctr = 0; ctr < cnt; ++ctr) { custom_free(*(virtual_points + ctr)); } custom_free(virtual_points); custom_free(prref); custom_free(rnnodes); custom_free(zero_pt); return rbfinal; } int count_ribbon_nodes(const struct ribbon_node *rn) { int cnt = 0; bool isfinal = false; while (!isfinal) { cnt++; isfinal = rn->hit_destination_patch; rn = rn->down; } return cnt; } long type_ray_ribbon(const struct ray_ribbon *rb) { int ctr=0; struct ribbon_node *rn = rb->head; while (rn != NULL) { ctr = MAX_SURFACES * ctr + rn->surface_index + 1; rn = rn->down; } return ctr; } struct ray_ribbon_array *throw_three_dim_ray_ribbon(struct transmitter *tn, const struct perfect_reflector **p, int num_ref, const double phi_start, const double phi_end, const double phi_incr, const double thet_start, const double thet_end, const double thet_incr) { struct ray_ribbon_array *rarr = custom_malloc(sizeof(struct ray_ribbon_array)); populate_ray_ribbon_array_long(tn, p, rarr, num_ref, phi_start, phi_end, phi_incr, thet_start, thet_end, thet_incr, true); return rarr; } struct ribbon_node *get_last_ribbon_node(const struct ray_ribbon *rb) { if (rb == 0) { fprintf(stderr, "Unexpected error! Should be non null!\n"); } struct ribbon_node *rn = rb->head; while (rn->down != NULL) { rn = rn->down; } return rn; } void populate_tx_paths(struct environment *env) { if (env->time_index % env->refresh_time == 0) { fprintf(stderr, "Refreshing rays at time index: %d\n", env->time_index); env->tx_paths_updated = false; } if (env->tx_paths_updated) return; clear_tx_paths(env); add_receiver_patch(env, 10); const struct perfect_reflector **prconst = (const struct perfect_reflector **) env->prarray; // now populate individual paths struct ray_ribbon_array *rb_arr; for (int ctr = 0; ctr < env->num_transmitters; ++ctr) { struct transmitter *tx = *(env->transmitters_array + ctr); rb_arr = init_ray_ribbon_array(30); //populate_ray_ribbon_array(tx, prconst, rb_arr, 600, 3, true); populate_ray_ribbon_array_long(tx, prconst, rb_arr, 3, -PI, PI, 0.01, 0, 2 * PI, 0.01, true); *(env->tx_paths + ctr) = rb_arr; } destroy_last_reflector(env); env->tx_paths_updated = true; env->tx_paths_updated_rx_paths_updated = false; } void update_receiver_ribbon_delay_dopplers(struct receiver_ray_ribbon *rb, const struct environment *env) { if (rb == 0) return; // compute delay double dist = 0; rb->reflection_phase = -1; struct ribbon_node *rn = rb->ribbon->head; while (rn != NULL) { dist += length_ribbon_node(rn); rn = rn->down; rb->reflection_phase++; } rb->delay = dist/C; // free space path loss rb->gain = 1 / (4 * PI * dist / env->wavelength); // compute doppler rb->doppler = compute_doppler(rb->ribbon, env); } double compute_doppler(const struct ray_ribbon *rb, const struct environment *env) { struct perfect_reflector *pr; struct ribbon_node *rn = rb->head->down; double src_pos[3]; double src_vel[3]; cblas_dcopy(3, rb->start_tx->gn->smm->position, 1, src_pos, 1); cblas_dcopy(3, rb->start_tx->gn->smm->velocity, 1, src_vel, 1); while (rn != NULL) { pr = *(env->prarray + rn->surface_index); reflect(pr->center_point, pr->unit_normal, src_vel, src_pos); rn = rn->down; } double rel_pos[3]; double rel_vel[3]; diff(src_pos, rb->end_rx->gn->smm->position, rel_pos); diff(src_vel, rb->end_rx->gn->smm->velocity, rel_vel); double tmp = cblas_dnrm2(3, rel_pos, 1); return -cblas_ddot(3, rel_vel, 1, rel_pos, 1) / tmp / env->wavelength; } double length_ribbon_node(const struct ribbon_node *rn) { double diff_vector[3]; diff(rn->current->point, rn->current->end_pt, diff_vector); return cblas_dnrm2(3, diff_vector, 1); } void reflect(const double *pos1, const double *n1, double *vel, double *pos) { double diff_vec[3]; double r1[3]; double v1[3]; diff(pos1, pos, diff_vec); reflection_operation(diff_vec, n1, r1); reflection_operation(vel, n1, v1); // now update vel and pos cblas_dcopy(3, pos1, 1, pos, 1); cblas_daxpy(3, 1, r1, 1, pos, 1); cblas_dcopy(3, v1, 1, vel, 1); } void reflection_operation(const double *v1, const double *n1, double *vref) { cblas_dcopy(3, v1, 1, vref, 1); double tmp = cblas_ddot(3, n1, 1, vref, 1); cblas_daxpy(3, -2 * tmp, n1, 1, vref, 1); } void readout_all_signals_buffer(struct environment *env) { #pragma parallel for private(ctr) shared(env) env->time_index++; for (int ctr = 0; ctr < env->num_receivers; ++ctr) { struct receiver *rx = (*(env->receivers_array + ctr)); rx->rx_signal = 0; int ctr1 = 0; struct receiver_ray_ribbon_ll_node *rlln = rx->rlln; while (rlln != 0) { struct receiver_ray_ribbon *rrbn = rlln->rrbn; rrbn->integrated_doppler_phase = fmod( (rrbn->integrated_doppler_phase + rrbn->doppler * env->delta_time), 1); double phase = 0; assert(rrbn->signal); phase += rrbn->integrated_doppler_phase + rrbn->reflection_phase - (env->frequency + rrbn->doppler) * rrbn->signal->delay; if (rrbn->signal->delay + rrbn->signal->transmit_time < env->time) { double txpower = rrbn->start_tx->gn->tm->power_in_dBm/10; rx->rx_signal += rrbn->gain * cexp(2 * PI * phase * I) * pow(10, txpower) * rrbn->signal->signal; rrbn->signal->receiver_read = true; } // calculate the direction of last ray struct ribbon_node *rn = rrbn->ribbon->head; while (!rn->hit_destination_patch) { rn = rn->down; } invert_spherical_angles(rn->current->unit_direction, &(rrbn->phi), &(rrbn->theta)); rlln = rlln->next; } double rx_noise_std = pow(rx->recv_noise_power, 0.5); rx->rx_signal += rx_noise_std * (*(env->unit_power_gaussian_noise + ctr)); } } void printout_all_signals_buffer(const struct environment *env, FILE *fpout) { static bool first_call = true; if (first_call) { first_call = false; if (fpout != NULL) { fprintf(fpout, "time\treceiver\t" "real\timag\n"); } } for (int ctr = 0; ctr < env->num_receivers; ++ctr) { struct receiver *rx = (*(env->receivers_array + ctr)); double real_sig = creal(rx->rx_signal); double imag_sig = cimag(rx->rx_signal); if (fpout != NULL) { fprintf(fpout, "%10.7g\t%10d" "\t%10.7g\t%10.7g\n", env->time, ctr, real_sig, imag_sig); } } } void printout_path_nariman(const struct environment *env, FILE *fpout) { static bool first_call = true; if (first_call) { first_call = false; fprintf(fpout, "env: rx <nodeid> <num_paths N> <delay1>" " <doppler1> <phi1> <theta1> <gain1> ... <" "delayN> <dopplerN> <phiN> <thetaN> <gainN>\n"); } for (int ctr = 0; ctr < env->num_receivers; ++ctr) { struct receiver *rx = (*(env->receivers_array + ctr)); struct receiver_ray_ribbon_ll_node *rlln = rx->rlln; struct receiver_ray_ribbon_ll_node *rllntmp = rx->rlln; int num_rays = 0; while (rllntmp != 0) { ++num_rays; rllntmp = rllntmp->next; } fprintf(fpout, "env: rx %10d %10d ", ctr, num_rays); while (rlln != 0) { struct receiver_ray_ribbon *rrbn = rlln->rrbn; fprintf(fpout, "%10.7g %10.7g %10.7g " "%10.7g %10.7g ", rrbn->delay, rrbn->doppler, rrbn->phi, rrbn->theta, rrbn->gain); rlln = rlln->next; } fprintf(fpout, "\n"); } } void clear_tx_paths(struct environment *env) { // clear existing paths int ctr = 0; while (*(env->tx_paths + ctr) != 0) { destroy_ray_ribbon_array(*(env->tx_paths + ctr)); *(env->tx_paths + ctr) = 0; ++ctr; } } // Deprecated functions // deprecated void populate_ray_ribbon_array_full_malloc(const struct transmitter *tx, const struct perfect_reflector **ref_arr, int num_ref, int num_points, const double complex *angles, struct ray_ribbon_array *rarr, bool single_type) { int ctr = 0; double phi, theta; for (ctr = 0; ctr < num_points; ++ctr) { phi = creal(*(angles + ctr)); theta = cimag(*(angles + ctr)); struct ribbon_node *rn = init_ribbon_node(); cblas_dcopy(3, tx->gn->smm->position, 1, rn->current->point, 1); double direction[3] = {cos(phi), sin(phi) * cos(theta), sin(phi) * sin(theta)}; cblas_dcopy(3, direction, 1, rn->current->unit_direction, 1); bool hit_des = process_vertical_chain(rn, ref_arr, num_ref); if (hit_des) { struct ray_ribbon *rb = init_ray_ribbon(rn); rb->start_tx = tx; bool ribbon_added = add_ray_ribbon(rarr, rb, single_type); if (!ribbon_added) destroy_ray_ribbon(rb); } else { destroy_chain_of_ribbon_nodes(rn); } } } // deprecated bool process_vertical_chain(struct ribbon_node *rn, const struct perfect_reflector **pr, int num_reflections) { // this function computes whether a ray can hit the // destination after a max num_reflections int ctr=0, ctrindex=-1, num_reflectors=0; double tmin = INFINITY, sgn = -1; const struct perfect_reflector *prsurf = *(pr + ctr); while(prsurf != NULL) { double complex dbl = compute_intersection(rn->current, prsurf); rn->current->length = creal(dbl); if (creal(dbl) < tmin && ctr != rn->surface_index) { tmin = creal(dbl); sgn = cimag(dbl); ctrindex = ctr; } ++ctr; prsurf = *(pr + ctr); } num_reflectors = ctr; if (sgn>0 || tmin>1e5) return false; if (ctrindex == num_reflectors - 1) { rn->hit_destination_patch = true; return true; } if (rn->num_reflections > num_reflections) return false; // only case remaining is if there is intersection with // reflector and number of reflections is small // update starting point struct ribbon_node *rn_next = custom_malloc(sizeof(struct ribbon_node)); rn_next->down = 0; rn_next->current = custom_calloc(1, sizeof(struct half_infinite_ray)); rn_next->hit_destination_patch = false; rn_next->num_reflections = rn->num_reflections + 1; cblas_dcopy(3, rn->current->point, 1, rn_next->current->point, 1); cblas_daxpy(3, tmin, rn->current->unit_direction, 1, rn_next->current->point, 1); rn_next->surface_index = ctrindex; // update ending point of previous ray cblas_dcopy(3, rn_next->current->point, 1, rn->current->end_pt, 1); // next update direction cblas_dcopy(3, rn->current->unit_direction, 1, rn_next->current->unit_direction, 1); const struct perfect_reflector *prsurface = pr[ctrindex]; double factor = -2*cblas_ddot(3, rn->current->unit_direction, 1, prsurface->unit_normal, 1); cblas_daxpy(3, factor, prsurface->unit_normal, 1, rn_next->current->unit_direction, 1); // update pointers rn->down = rn_next; return process_vertical_chain(rn_next, pr, num_reflections); } // deprecated struct ray_ribbon *refine_ray_image_ribbon(const struct transmitter *tx, const struct ray_ribbon *rb, const struct receiver *rx, const struct perfect_reflector **pr) { struct ray_ribbon_array *node_array_mod = generate_nearby_ribbons( tx, pr, 3, rb); // return if insufficient number of ribbons int len_array = 0; while (*(node_array_mod->ribbons + len_array) != 0) { len_array++; } if (len_array < 3) { destroy_ray_ribbon_array(node_array_mod); fprintf(stderr, "Insufficient ribbons generated\n"); return 0; } const double *point = rx->gn->smm->position; if (_RAYTRACING_DEBUG) { fprintf(stderr, "Position\n"); print_vector(point); } int ctr_typ_so_far = 0, ctr = 0; double weights[3]; struct ray_ribbon *rbn = *(node_array_mod->ribbons); struct ribbon_node *rn = 0; while ((!is_close_ribbon(rbn, point)) && ctr < 100) { compute_averaging_coefficients(point, node_array_mod, weights); rn = init_ribbon_node(); compute_average_ribbon_node(rn, node_array_mod, weights); destroy_ray_ribbon_nodes(rbn); bool has_hit = process_vertical_chain(rn, pr, 3); if (!has_hit) { fprintf(stderr, "Unexpected error. Destroying rn.\n"); destroy_chain_of_ribbon_nodes(rn); ctr = 101; break; } (*(node_array_mod->ribbons))->head = rn; rbn = *(node_array_mod->ribbons); ++ctr; } destroy_ray_ribbon_array_all_but_first(node_array_mod); if (ctr < 100) { rbn->start_tx = tx; rbn->end_rx = rx; return rbn; } else { fprintf(stderr, "Ray did not converge!\n"); destroy_ray_ribbon(rbn); return 0; } // return 0 if ray ribbon does not converge } // deprecated bool is_close_ribbon(const struct ray_ribbon *rb, const double *point) { return isclose(rb->head, point); } // deprecated bool isclose(const struct ribbon_node *rn, const double *point) { double diff[3]; while (rn->down != NULL) { rn = rn->down; } cblas_dcopy(3, rn->current->end_pt, 1, diff, 1); cblas_daxpy(3, -1, point, 1, diff, 1); if (cblas_dnrm2(3, diff, 1) < 1e-6) return true; return false; } // deprecated void compute_average_ribbon_node(struct ribbon_node *rn, const struct ray_ribbon_array *rba, double *weights) { double thet, phi; double thetaav = 0, phiav = 0; int ctr = 0; struct ribbon_node *node; for (; ctr < 3; ++ctr) { node = (*(rba->ribbons + ctr))->head; invert_spherical_angles(node->current->unit_direction, &phi, &thet); thetaav += *(weights+ctr) *thet; phiav += *(weights+ctr) *phi; } rn->current->unit_direction[0] = cos(phiav); rn->current->unit_direction[1] = sin(phiav)*cos(thetaav); rn->current->unit_direction[2] = sin(phiav)*sin(thetaav); rn->hit_destination_patch = 0; rn->num_reflections = 0; rn->ctr = 1; cblas_dcopy(3, node->current->point, 1, rn->current->point, 1); } // deprecated void compute_averaging_coefficients(const double *point, const struct ray_ribbon_array *rba, double *weights) { gsl_matrix *mat = gsl_matrix_alloc(3, 2); gsl_permutation *perm = gsl_permutation_alloc(3); gsl_vector *x = gsl_vector_alloc(2); gsl_vector *b = gsl_vector_alloc(3); gsl_vector *tau = gsl_vector_alloc(2); gsl_vector *residual = gsl_vector_alloc(3); int c0, c1; for (c0=0; c0<3; c0++) { /* fprintf(stderr, ANSI_COLOR_GREEN); */ /* print_ray_ribbon_array(rba); */ /* fprintf(stderr, ANSI_COLOR_RESET); */ const struct ribbon_node *node = get_last_ribbon_node(*(rba->ribbons)); gsl_vector_set(b, c0, *(point + c0) - node->current->end_pt[c0]); for (c1=0; c1<2; c1++) { const struct ribbon_node *nodeset = get_last_ribbon_node(*(rba->ribbons + c1 + 1)); gsl_matrix_set(mat, c0, c1, nodeset->current->end_pt[c0] - node->current->end_pt[c0]); } } gsl_linalg_QR_decomp(mat, tau); gsl_linalg_QR_lssolve(mat, tau, b, x, residual); *(weights) = 1 - gsl_vector_get(x, 0) - gsl_vector_get(x, 1); *(weights + 1) = gsl_vector_get(x, 0); *(weights + 2) = gsl_vector_get(x, 1); gsl_matrix_free(mat); gsl_permutation_free(perm); gsl_vector_free(x); gsl_vector_free(b); gsl_vector_free(tau); gsl_vector_free(residual); } // deprecated void remove_ribbon_node_duplicates(struct ribbon_node *rn) { struct ribbon_node *rn_orig = rn; struct ribbon_node *rn_tmp; while (rn != NULL) { if ((rn->down != NULL) && (length_ribbon_node(rn->down) < 1e-4)) { rn_tmp = rn->down; rn->down = rn->down->down; destroy_ribbon_node(rn_tmp); } rn = rn->down; } // update num_reflections rn = rn_orig; int ctr = 0; while (rn != NULL) { rn->num_reflections = ctr; ++ctr; rn = rn->down; } } // deprecated void update_ribbon_delay_dopplers(struct ray_ribbon *rb, const struct environment *env) { if (rb == 0) return; // compute delay double dist = 0; rb->reflection_phase = -1; struct ribbon_node *rn = rb->head; while (rn != NULL) { dist += length_ribbon_node(rn); rn = rn->down; rb->reflection_phase++; } rb->delay = dist/C; // free space path loss rb->gain = 1 / (4 * PI * dist / env->wavelength); // compute doppler rb->doppler = compute_doppler(rb, env); } // deprecated void populate_env_paths(struct environment *env) { add_receiver_patch(env, 20); // first sound the channel const struct perfect_reflector **prconst = (const struct perfect_reflector **) env->prarray; // now generate rayribbons for each receiver // clear existing ray ribbon arrays clear_env_paths(env); // ctr loops over receivers int ctr = 0; struct receiver *rx = *(env->receivers_array); while (ctr < env->num_receivers) { // ctrtx loops over transmitters int ctrtx = 0; struct ray_ribbon_array *rb_arr = init_ray_ribbon_array(30); // if too low, can cause bugs struct ray_ribbon_array *rba = *(env->tx_paths); struct transmitter *tx = *(env->transmitters_array); while (ctrtx < env->num_transmitters) { // ctr2 loops over rays in tx path ray ribbon from a // part. tx int ctr2 = 0; struct ray_ribbon *rb; rb = *(rba->ribbons + ctr2); while(rb != 0) { struct ray_ribbon *tmprb = refine_ray_ribbon_image(tx, rb, rx, prconst); bool stat; if (tmprb == 0) { fprintf(stderr, "Null ribbon for ray %d" " of tx %d at rx %d\n", ctr2, ctrtx, ctr); } else { stat = add_ray_ribbon(rb_arr, tmprb, true); } if (!stat && tmprb != 0) { fprintf(stderr, "Unexpected error! " "stat should always be true!\n"); } ctr2++; rb = *(rba->ribbons + ctr2); } ctrtx++; rba = *(env->tx_paths + ctrtx); tx = *(env->transmitters_array + ctrtx); } *(env->env_paths + ctr) = rb_arr; ++ctr; *(env->env_paths + ctr) = 0; rx = *(env->receivers_array + ctr); } destroy_last_reflector(env); } // deprecated struct ray_ribbon_array *generate_nearby_ribbons(const struct transmitter *tx, const struct perfect_reflector **ref_arr, int num_ref, const struct ray_ribbon *rb) { double phi, theta; invert_spherical_angles(rb->head->current->unit_direction, &phi, &theta); struct ray_ribbon_array *rarr = init_ray_ribbon_array(4); double complex *angles = custom_malloc(3 * sizeof(double complex)); *angles = (phi - 0.00001) + I * (theta + 0.00002); *(angles + 1) = (phi + 0.000039) + I * (theta + 0.000029); *(angles + 2) = (phi - 0.000041) + I * (theta - 0.00007); /* double fact = (1e-3)/RAND_MAX; */ /* *angles = (phi - fact * rand()) + I * (theta + fact * rand()); */ /* *(angles + 1) = phi + fact * rand() + I * (theta + fact * rand()); */ /* *(angles + 2) = phi + fact * rand() + I * (theta + fact * rand()); */ populate_ray_ribbon_array_full_copy(tx, ref_arr, num_ref, 3, angles, rarr, false); custom_free(angles); return rarr; } // deprecated void update_env_paths_delay_dopplers(struct environment *env) { int ctr = 0; struct ray_ribbon_array *rba = *(env->env_paths + ctr); while (rba != 0) { int ctr1 = 0; struct ray_ribbon *rb = *(rba->ribbons + ctr1); while (rb != NULL) { update_ribbon_delay_dopplers(rb, env); ctr1++; rb = *(rba->ribbons + ctr1); } ++ctr; rba = *(env->env_paths + ctr); } } // deprecated void clear_env_paths(struct environment *env) { // clear existing paths int ctr = 0; while (*(env->env_paths + ctr) != 0) { destroy_ray_ribbon_array(*(env->env_paths + ctr)); *(env->env_paths + ctr) = 0; ++ctr; } } // deprecated void readout_all_signals(struct environment *env, FILE *fpout) { static bool first_call = true; if (first_call) { first_call = false; if (fpout != NULL) { fprintf(fpout, "time\treceiver\t" "real\timag\n"); } } double complex signal; struct ray_ribbon_array *rba; int ctr = 0; rba = *(env->env_paths + ctr); struct receiver *rx = (*(env->receivers_array + ctr)); while (rba != 0) { signal = 0; int ctr1 = 0; struct ray_ribbon *rb = *(rba->ribbons + ctr1); while (rb != 0) { rb->integrated_doppler_phase = fmod( (rb->integrated_doppler_phase + rb->doppler * env->delta_time), 1); double phase = 0; phase += rb->integrated_doppler_phase + rb->reflection_phase - (env->frequency + rb->doppler) * rb->delay; signal += rb->gain * cexp(2 * PI * phase * I) * pow(10, rb->start_tx->gn->tm->power_in_dBm/10) * rb->start_tx->baseband_signal; ctr1++; rb = *(rba->ribbons + ctr1); } double rx_noise_std = pow(rx->recv_noise_power, 0.5); signal += rx_noise_std * (*(env->unit_power_gaussian_noise + ctr)); double real_sig = creal(signal); double imag_sig = cimag(signal); if (fpout != NULL) { fprintf(fpout, "%lf\t%d\t" "%e\t%e\n", env->time, ctr, real_sig, imag_sig); } ++ctr; rba = *(env->env_paths + ctr); rx = (*(env->receivers_array + ctr)); } }
simde-diagnostic.h
/* SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Copyright: * 2017-2020 Evan Nemerson <evan@nemerson.com> */ /* SIMDe targets a very wide range of standards and compilers, and our * goal is to compile cleanly even with extremely aggressive warnings * (i.e., -Weverything in clang, -Wextra in GCC, /W4 for MSVC, etc.) * treated as errors. * * While our preference is to resolve the underlying issue a given * diagnostic is warning us about, sometimes that's not possible. * Fixing a warning in one compiler may cause problems in another. * Sometimes a warning doesn't really apply to us (false positives), * and sometimes adhering to a warning would mean dropping a feature * we *know* the compiler supports since we have tested specifically * for the compiler or feature. * * When practical, warnings are only disabled for specific code. For * a list of warnings which are enabled by default in all SIMDe code, * see SIMDE_DISABLE_UNWANTED_DIAGNOSTICS. Note that we restore the * warning stack when SIMDe is done parsing, so code which includes * SIMDe is not deprived of these warnings. */ #if !defined(SIMDE_DIAGNOSTIC_H) #define SIMDE_DIAGNOSTIC_H #include "hedley.h" #include "simde-detect-clang.h" #include "simde-arch.h" /* This is only to help us implement functions like _mm_undefined_ps. */ #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) #undef SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ #endif #if HEDLEY_HAS_WARNING("-Wuninitialized") #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("clang diagnostic ignored \"-Wuninitialized\"") #elif HEDLEY_GCC_VERSION_CHECK(4,2,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("GCC diagnostic ignored \"-Wuninitialized\"") #elif HEDLEY_PGI_VERSION_CHECK(19,10,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("diag_suppress 549") #elif HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,SEC_UNINITIALIZED_MEM_READ,SEC_UNDEFINED_RETURN_VALUE,unassigned)") #elif HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,SEC_UNINITIALIZED_MEM_READ,SEC_UNDEFINED_RETURN_VALUE)") #elif HEDLEY_SUNPRO_VERSION_CHECK(5,12,0) && defined(__cplusplus) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,unassigned)") #elif \ HEDLEY_TI_VERSION_CHECK(16,9,9) || \ HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,2) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("diag_suppress 551") #elif HEDLEY_INTEL_VERSION_CHECK(13,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("warning(disable:592)") #elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) && !defined(__MSVC_RUNTIME_CHECKS) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ __pragma(warning(disable:4700)) #endif /* GCC emits a lot of "notes" about the ABI being different for things * in newer versions of GCC. We don't really care because all our * functions are inlined and don't generate ABI. */ #if HEDLEY_GCC_VERSION_CHECK(7,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_PSABI_ _Pragma("GCC diagnostic ignored \"-Wpsabi\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_PSABI_ #endif /* Since MMX uses x87 FP registers, you're supposed to call _mm_empty() * after each MMX function before any floating point instructions. * Some compilers warn about functions which use MMX functions but * don't call _mm_empty(). However, since SIMDe is implementyng the * MMX API we shouldn't be calling _mm_empty(); we leave it to the * caller to invoke simde_mm_empty(). */ #if HEDLEY_INTEL_VERSION_CHECK(19,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ _Pragma("warning(disable:13200 13203)") #elif defined(HEDLEY_MSVC_VERSION) #define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ __pragma(warning(disable:4799)) #else #define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ #endif /* Intel is pushing people to use OpenMP SIMD instead of Cilk+, so they * emit a diagnostic if you use #pragma simd instead of * #pragma omp simd. SIMDe supports OpenMP SIMD, you just need to * compile with -qopenmp or -qopenmp-simd and define * SIMDE_ENABLE_OPENMP. Cilk+ is just a fallback. */ #if HEDLEY_INTEL_VERSION_CHECK(18,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ _Pragma("warning(disable:3948)") #else #define SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ #endif /* MSVC emits a diagnostic when we call a function (like * simde_mm_set_epi32) while initializing a struct. We currently do * this a *lot* in the tests. */ #if \ defined(HEDLEY_MSVC_VERSION) #define SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ __pragma(warning(disable:4204)) #else #define SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ #endif /* This warning needs a lot of work. It is triggered if all you do is * pass the value to memcpy/__builtin_memcpy, or if you initialize a * member of the union, even if that member takes up the entire union. * Last tested with clang-10, hopefully things will improve in the * future; if clang fixes this I'd love to enable it. */ #if \ HEDLEY_HAS_WARNING("-Wconditional-uninitialized") #define SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ _Pragma("clang diagnostic ignored \"-Wconditional-uninitialized\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ #endif /* This warning is meant to catch things like `0.3 + 0.4 == 0.7`, which * will is false. However, SIMDe uses these operations exclusively * for things like _mm_cmpeq_ps, for which we really do want to check * for equality (or inequality). * * If someone wants to put together a SIMDE_FLOAT_EQUAL(a, op, b) macro * which just wraps a check in some code do disable this diagnostic I'd * be happy to accept it. */ #if \ HEDLEY_HAS_WARNING("-Wfloat-equal") || \ HEDLEY_GCC_VERSION_CHECK(3,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ _Pragma("GCC diagnostic ignored \"-Wfloat-equal\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ #endif /* This is because we use HEDLEY_STATIC_ASSERT for static assertions. * If Hedley can't find an implementation it will preprocess to * nothing, which means there will be a trailing semi-colon. */ #if HEDLEY_HAS_WARNING("-Wextra-semi") #define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ _Pragma("clang diagnostic ignored \"-Wextra-semi\"") #elif HEDLEY_GCC_VERSION_CHECK(8,1,0) && defined(__cplusplus) #define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ _Pragma("GCC diagnostic ignored \"-Wextra-semi\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ #endif /* We do use a few variadic macros, which technically aren't available * until C99 and C++11, but every compiler I'm aware of has supported * them for much longer. That said, usage is isolated to the test * suite and compilers known to support them. */ #if HEDLEY_HAS_WARNING("-Wvariadic-macros") || HEDLEY_GCC_VERSION_CHECK(4,0,0) #if HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") #define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ \ _Pragma("clang diagnostic ignored \"-Wvariadic-macros\"") \ _Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ _Pragma("GCC diagnostic ignored \"-Wvariadic-macros\"") #endif #else #define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ #endif /* emscripten requires us to use a __wasm_unimplemented_simd128__ macro * before we can access certain SIMD intrinsics, but this diagnostic * warns about it being a reserved name. It is a reserved name, but * it's reserved for the compiler and we are using it to convey * information to the compiler. * * This is also used when enabling native aliases since we don't get to * choose the macro names. */ #if HEDLEY_HAS_WARNING("-Wreserved-id-macro") #define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ _Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ #endif /* Similar to above; types like simde__m128i are reserved due to the * double underscore, but we didn't choose them, Intel did. */ #if HEDLEY_HAS_WARNING("-Wreserved-identifier") #define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_ _Pragma("clang diagnostic ignored \"-Wreserved-identifier\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_ #endif /* clang 3.8 warns about the packed attribute being unnecessary when * used in the _mm_loadu_* functions. That *may* be true for version * 3.8, but for later versions it is crucial in order to make unaligned * access safe. */ #if HEDLEY_HAS_WARNING("-Wpacked") #define SIMDE_DIAGNOSTIC_DISABLE_PACKED_ _Pragma("clang diagnostic ignored \"-Wpacked\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_PACKED_ #endif /* Triggered when assigning a float to a double implicitly. We use * explicit casts in SIMDe, this is only used in the test suite. */ #if HEDLEY_HAS_WARNING("-Wdouble-promotion") #define SIMDE_DIAGNOSTIC_DISABLE_DOUBLE_PROMOTION_ _Pragma("clang diagnostic ignored \"-Wdouble-promotion\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_DOUBLE_PROMOTION_ #endif /* Several compilers treat conformant array parameters as VLAs. We * test to make sure we're in C mode (C++ doesn't support CAPs), and * that the version of the standard supports CAPs. We also reject * some buggy compilers like MSVC (the logic is in Hedley if you want * to take a look), but with certain warnings enabled some compilers * still like to emit a diagnostic. */ #if HEDLEY_HAS_WARNING("-Wvla") #define SIMDE_DIAGNOSTIC_DISABLE_VLA_ _Pragma("clang diagnostic ignored \"-Wvla\"") #elif HEDLEY_GCC_VERSION_CHECK(4,3,0) #define SIMDE_DIAGNOSTIC_DISABLE_VLA_ _Pragma("GCC diagnostic ignored \"-Wvla\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_VLA_ #endif /* If you add an unused attribute to a function and don't use it, clang * may emit this. */ #if HEDLEY_HAS_WARNING("-Wused-but-marked-unused") #define SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ _Pragma("clang diagnostic ignored \"-Wused-but-marked-unused\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ #endif #if HEDLEY_HAS_WARNING("-Wpass-failed") #define SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ _Pragma("clang diagnostic ignored \"-Wpass-failed\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ #endif #if HEDLEY_HAS_WARNING("-Wpadded") #define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ _Pragma("clang diagnostic ignored \"-Wpadded\"") #elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) /* Likely goes back further */ #define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ __pragma(warning(disable:4324)) #else #define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ #endif #if HEDLEY_HAS_WARNING("-Wzero-as-null-pointer-constant") #define SIMDE_DIAGNOSTIC_DISABLE_ZERO_AS_NULL_POINTER_CONSTANT_ _Pragma("clang diagnostic ignored \"-Wzero-as-null-pointer-constant\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_ZERO_AS_NULL_POINTER_CONSTANT_ #endif #if HEDLEY_HAS_WARNING("-Wold-style-cast") #define SIMDE_DIAGNOSTIC_DISABLE_OLD_STYLE_CAST_ _Pragma("clang diagnostic ignored \"-Wold-style-cast\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_OLD_STYLE_CAST_ #endif #if HEDLEY_HAS_WARNING("-Wcast-function-type") || HEDLEY_GCC_VERSION_CHECK(8,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_CAST_FUNCTION_TYPE_ _Pragma("GCC diagnostic ignored \"-Wcast-function-type\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_CAST_FUNCTION_TYPE_ #endif /* clang will emit this warning when we use C99 extensions whan not in * C99 mode, even though it does support this. In such cases we check * the compiler and version first, so we know it's not a problem. */ #if HEDLEY_HAS_WARNING("-Wc99-extensions") #define SIMDE_DIAGNOSTIC_DISABLE_C99_EXTENSIONS_ _Pragma("clang diagnostic ignored \"-Wc99-extensions\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_C99_EXTENSIONS_ #endif /* https://github.com/simd-everywhere/simde/issues/277 */ #if defined(HEDLEY_GCC_VERSION) && HEDLEY_GCC_VERSION_CHECK(4,6,0) && !HEDLEY_GCC_VERSION_CHECK(6,4,0) && defined(__cplusplus) #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ _Pragma("GCC diagnostic ignored \"-Wunused-but-set-variable\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ #endif /* This is the warning that you normally define _CRT_SECURE_NO_WARNINGS * to silence, but you have to do that before including anything and * that would require reordering includes. */ #if defined(_MSC_VER) #define SIMDE_DIAGNOSTIC_DISABLE_ANNEX_K_ __pragma(warning(disable:4996)) #else #define SIMDE_DIAGNOSTIC_DISABLE_ANNEX_K_ #endif /* Some compilers, such as clang, may use `long long` for 64-bit * integers, but `long long` triggers a diagnostic with * -Wc++98-compat-pedantic which says 'long long' is incompatible with * C++98. */ #if HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") #if HEDLEY_HAS_WARNING("-Wc++11-long-long") #define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ \ _Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"") \ _Pragma("clang diagnostic ignored \"-Wc++11-long-long\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ _Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"") #endif #else #define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ #endif /* Some problem as above */ #if HEDLEY_HAS_WARNING("-Wc++11-long-long") #define SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_ _Pragma("clang diagnostic ignored \"-Wc++11-long-long\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_ #endif /* emscripten emits this whenever stdin/stdout/stderr is used in a * macro. */ #if HEDLEY_HAS_WARNING("-Wdisabled-macro-expansion") #define SIMDE_DIAGNOSTIC_DISABLE_DISABLED_MACRO_EXPANSION_ _Pragma("clang diagnostic ignored \"-Wdisabled-macro-expansion\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_DISABLED_MACRO_EXPANSION_ #endif /* Clang uses C11 generic selections to implement some AltiVec * functions, which triggers this diagnostic when not compiling * in C11 mode */ #if HEDLEY_HAS_WARNING("-Wc11-extensions") #define SIMDE_DIAGNOSTIC_DISABLE_C11_EXTENSIONS_ _Pragma("clang diagnostic ignored \"-Wc11-extensions\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_C11_EXTENSIONS_ #endif /* Clang sometimes triggers this warning in macros in the AltiVec and * NEON headers, or due to missing functions. */ #if HEDLEY_HAS_WARNING("-Wvector-conversion") #define SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") /* For NEON, the situation with -Wvector-conversion in clang < 10 is * bad enough that we just disable the warning altogether. On x86, * clang has similar issues on several sse4.2+ intrinsics before 3.8. */ #if \ (defined(SIMDE_ARCH_ARM) && SIMDE_DETECT_CLANG_VERSION_NOT(10,0,0)) || \ SIMDE_DETECT_CLANG_VERSION_NOT(3,8,0) #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ #endif #else #define SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ #endif #if !defined(SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_) #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ #endif /* Prior to 5.0, clang didn't support disabling diagnostics in * statement exprs. As a result, some macros we use don't * properly silence warnings. */ #if SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0) && HEDLEY_HAS_WARNING("-Wcast-qual") && HEDLEY_HAS_WARNING("-Wcast-align") #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ _Pragma("clang diagnostic ignored \"-Wcast-qual\"") _Pragma("clang diagnostic ignored \"-Wcast-align\"") #elif SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0) && HEDLEY_HAS_WARNING("-Wcast-qual") #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ _Pragma("clang diagnostic ignored \"-Wcast-qual\"") #elif SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0) && HEDLEY_HAS_WARNING("-Wcast-align") #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ _Pragma("clang diagnostic ignored \"-Wcast-align\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ #endif /* SLEEF triggers this a *lot* in their headers */ #if HEDLEY_HAS_WARNING("-Wignored-qualifiers") #define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_ _Pragma("clang diagnostic ignored \"-Wignored-qualifiers\"") #elif HEDLEY_GCC_VERSION_CHECK(4,3,0) #define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_ _Pragma("GCC diagnostic ignored \"-Wignored-qualifiers\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_ #endif /* GCC emits this under some circumstances when using __int128 */ #if HEDLEY_GCC_VERSION_CHECK(4,8,0) #define SIMDE_DIAGNOSTIC_DISABLE_PEDANTIC_ _Pragma("GCC diagnostic ignored \"-Wpedantic\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_PEDANTIC_ #endif /* MSVC doesn't like (__assume(0), code) and will warn about code being * unreachable, but we want it there because not all compilers * understand the unreachable macro and will complain if it is missing. * I'm planning on adding a new macro to Hedley to handle this a bit * more elegantly, but until then... */ #if defined(HEDLEY_MSVC_VERSION) #define SIMDE_DIAGNOSTIC_DISABLE_UNREACHABLE_ __pragma(warning(disable:4702)) #else #define SIMDE_DIAGNOSTIC_DISABLE_UNREACHABLE_ #endif /* This is a false positive from GCC in a few places. */ #if HEDLEY_GCC_VERSION_CHECK(4,7,0) #define SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_ _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_ #endif #if defined(SIMDE_ENABLE_NATIVE_ALIASES) #define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ \ SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ #else #define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ #endif /* Some native functions on E2K with instruction set < v6 are declared * as deprecated due to inefficiency. Still they are more efficient * than SIMDe implementation. So we're using them, and switching off * these deprecation warnings. */ #if defined(HEDLEY_MCST_LCC_VERSION) # define SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS _Pragma("diag_suppress 1215,1444") # define SIMDE_LCC_REVERT_DEPRECATED_WARNINGS _Pragma("diag_default 1215,1444") #else # define SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS # define SIMDE_LCC_REVERT_DEPRECATED_WARNINGS #endif #define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS \ HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION \ SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ \ SIMDE_DIAGNOSTIC_DISABLE_PSABI_ \ SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ \ SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ \ SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ \ SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ \ SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ \ SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ \ SIMDE_DIAGNOSTIC_DISABLE_VLA_ \ SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ \ SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ \ SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ \ SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_ \ SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ \ SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ \ SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ \ SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_ #endif /* !defined(SIMDE_DIAGNOSTIC_H) */
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/threshold.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double normalize, **kernel; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory( (size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]+=(double) (1.0-normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if (((blur_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) == 0)) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(blur_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveBlurImage) #endif proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double normalize, **kernel; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) sharp_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait sharp_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); sharp_traits=GetPixelChannelTraits(sharp_image,channel); if ((traits == UndefinedPixelTrait) || (sharp_traits == UndefinedPixelTrait)) continue; if (((sharp_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) == 0)) { SetPixelChannel(sharp_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((sharp_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(sharp_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveSharpenImage) #endif proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image, const KernelInfo *kernel_info,ExceptionInfo *exception) { Image *convolve_image; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImage(image,kernel_info,exception); if (convolve_image != (Image *) NULL) return(convolve_image); #endif convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info, exception); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { register Quantum *p, *q, *r, *s; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickRealType v; register ssize_t i, x; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*(columns+2)+x_offset); s=q-(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; MagickRealType v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) && ((MagickRealType) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) && ((MagickRealType) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; Quantum *magick_restrict buffer, *magick_restrict pixels; register ssize_t i; size_t length; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image,exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,0,0,MagickTrue,exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(despeckle_image,DirectClass,exception); if (status == MagickFalse) { despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait despeckle_traits, traits; register ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); despeckle_traits=GetPixelChannelTraits(despeckle_image,channel); if ((traits == UndefinedPixelTrait) || (despeckle_traits == UndefinedPixelTrait)) continue; if ((despeckle_traits & CopyPixelTrait) != 0) continue; (void) ResetMagickMemory(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { pixels[j++]=p[i]; p+=GetPixelChannels(image); } j++; } (void) ResetMagickMemory(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelChannel(despeckle_image,channel,pixels[j++],q); q+=GetPixelChannels(despeckle_image); } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) status=MagickFalse; j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; register ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->width* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImage(emboss_image,exception); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickRealType GetMeanLuma(const Image *magick_restrict image, const double *magick_restrict pixel) { return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */ } MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define KuwaharaImageTag "Kuwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse) { gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,kuwahara_image,image->rows,1) #endif for (y=0; y < (ssize_t) gaussian_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) gaussian_image->columns; x++) { const Quantum *magick_restrict p; double min_variance; RectangleInfo quadrant, target; register size_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const Quantum *magick_restrict k; double mean[MaxPixelChannels], variance; register ssize_t n; ssize_t j; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } case 3: default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const Quantum *) NULL) break; for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]=0.0; k=p; for (n=0; n < (ssize_t) (width*width); n++) { for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]+=(double) k[j]; k+=GetPixelChannels(gaussian_image); } for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(gaussian_image,k); variance+=(luma-GetMeanLuma(gaussian_image,mean))* (luma-GetMeanLuma(gaussian_image,mean)); k+=GetPixelChannels(gaussian_image); } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image, UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double) target.y+target.height/2.0,q,exception); q+=GetPixelChannels(kuwahara_image); } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_KuwaharaImage) #endif proceed=SetImageProgress(image,KuwaharaImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanLinePixels, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanLinePixels_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse) { contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanLinePixels_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanLinePixels)); if (scanLinePixels_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(float) ((width+1)*(width+1)); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *out, *pix, *pixels; register ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p+=image->number_channels; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *pix, *pixels; register Quantum *magick_restrict q; register ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; SetPixelRed(contrast_image,ClampToQuantum(GetPixelRed(image,p)*mult), q); SetPixelGreen(contrast_image,ClampToQuantum(GetPixelGreen(image,p)* mult),q); SetPixelBlue(contrast_image,ClampToQuantum(GetPixelBlue(image,p)*mult), q); p+=image->number_channels; q+=contrast_image->number_channels; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static MagickRealType *GetMotionBlurKernel(const size_t width, const double sigma) { MagickRealType *kernel, normalize; register ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view, *motion_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; OffsetInfo *offset; PointInfo point; register ssize_t i; size_t width; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); blur_image=DestroyImage(blur_image); return((Image *) NULL); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (i=0; i < (ssize_t) width; i++) { offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5); offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5); } /* Motion blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); motion_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register MagickRealType *magick_restrict k; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if (((blur_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) == 0)) { SetPixelChannel(blur_image,channel,p[i],q); continue; } k=kernel; pixel=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+ offset[j].y,1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=(*k)*r[i]; k++; } SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q); continue; } alpha=0.0; gamma=0.0; for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1, 1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=(*k)*alpha*r[i]; gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MotionBlurImage) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); motion_view=DestroyCacheView(motion_view); image_view=DestroyCacheView(image_view); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MagickPathExtent], label[MagickPathExtent]; double degrees, gamma, percentage, radius, sigma, threshold; extern const char DefaultTileFrame[]; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; register ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception); if (i == (NumberTiles/2)) { (void) QueryColorCompliance("#dfdfdf",AllCompliance, &thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees, 2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0*percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImage(preview_image,gamma,exception); (void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse,exception); (void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent,"colors %.20g",(double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius, (size_t) radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MagickPathExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MagickPathExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MagickPathExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MagickPathExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MagickPathExtent); break; } case 6: { (void) CopyMagickString(factor,"Poisson",MagickPathExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g",radius, sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail,(double) (percentage*((double) QuantumRange+1.0))/100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"threshold %g",(double) (percentage*((double) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,image->interpolate,radius, exception); (void) FormatLocaleString(label,MagickPathExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange*percentage/ 100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees, degrees); break; } case RaisePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; geometry.width=(size_t) (2*i+2); geometry.height=(size_t) (2*i+2); geometry.x=(i-1)/2; geometry.y=(i-1)/2; (void) RaiseImage(preview_image,&geometry,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold,exception); (void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees, image->interpolate,exception); (void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5*degrees, 2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",radius, sigma); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",radius, sigma); break; } case JPEGPreview: { char filename[MagickPathExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MagickPathExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image,exception); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MagickPathExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%.20gb ", factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label,exception); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename,MagickPathExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o angle: the angle of the radial blur. % % o blur: the blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { CacheView *blur_view, *image_view, *radial_view; double blur_radius, *cos_theta, offset, *sin_theta, theta; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; PointInfo blur_center; register ssize_t i; size_t n; ssize_t y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRotationalBlurImage(image,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(double) (n-1); cos_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*cos_theta)); sin_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*sin_theta)); if ((cos_theta == (double *) NULL) || (sin_theta == (double *) NULL)) { blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(double) (n-1)/2.0; for (i=0; i < (ssize_t) n; i++) { cos_theta[i]=cos((double) (theta*i-offset)); sin_theta[i]=sin((double) (theta*i-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); radial_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double radius; PointInfo center; register ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if (((blur_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) == 0)) { SetPixelChannel(blur_image,channel,p[i],q); continue; } gamma=0.0; pixel=0.0; if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) || (channel == AlphaPixelChannel)) { for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=r[i]; gamma++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { double alpha; r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) QuantumScale*GetPixelAlpha(image,r); pixel+=alpha*r[i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RotationalBlurImage) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); radial_view=DestroyCacheView(radial_view); image_view=DestroyCacheView(image_view); cos_theta=(double *) RelinquishMagickMemory(cos_theta); sin_theta=(double *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; register ssize_t i; size_t width; ssize_t center, j, u, v, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,width*sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; register const MagickRealType *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace,exception); if (status == MagickFalse) { luminance_image=DestroyImage(luminance_image); blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)* ((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L)); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double contrast; MagickBooleanType sync; register const Quantum *magick_restrict l, *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity; register ssize_t i; intensity=GetPixelIntensity(image,p+center); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict luminance_pixels, *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if (((blur_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) == 0)) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel; pixel=0.0; pixels=p; luminance_pixels=l; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,luminance_pixels)- intensity; if (fabs(contrast) < threshold) { pixel+=(*k)*pixels[i]; gamma+=(*k); } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(image,pixels)-intensity; if (fabs(contrast) < threshold) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); l+=GetPixelChannels(luminance_image); q+=GetPixelChannels(blur_image); } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SelectiveBlurImage) #endif proceed=SetImageProgress(image,SelectiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { double distance, normal_distance, shade; PrimaryInfo normal; register const Quantum *magick_restrict center, *magick_restrict p, *magick_restrict post, *magick_restrict pre; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i; /* Determine the surface normal and compute shading. */ pre=p+GetPixelChannels(linear_image); center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image); post=center+(linear_image->columns+2)*GetPixelChannels(linear_image); normal.x=(double) ( GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,center-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,center+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image))); normal.y=(double) ( GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,post)+ GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre)- GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image))); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+ normal.z*normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel; PixelTrait shade_traits, traits; channel=GetPixelChannelChannel(linear_image,i); traits=GetPixelChannelTraits(linear_image,channel); shade_traits=GetPixelChannelTraits(shade_image,channel); if ((traits == UndefinedPixelTrait) || (shade_traits == UndefinedPixelTrait)) continue; if (((shade_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(linear_image,center) == 0)) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if ((traits & UpdatePixelTrait) == 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if (gray != MagickFalse) { SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q); continue; } SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade* center[i]),q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(shade_image); } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ShadeImage) #endif proceed=SetImageProgress(image,ShadeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a square area defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image, % const PixelInterpolateMethod method,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: intepolation method. % % o radius: choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image, const PixelInterpolateMethod method,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse) { spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,spread_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolatePixelChannels(image,image_view,spread_image,method, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q, exception); q+=GetPixelChannels(spread_image); } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SpreadImage) #endif proceed=SetImageProgress(image,SpreadImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; double quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold, exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif unsharp_image=BlurImage(image,radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(double) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits, unsharp_traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); unsharp_traits=GetPixelChannelTraits(unsharp_image,channel); if ((traits == UndefinedPixelTrait) || (unsharp_traits == UndefinedPixelTrait)) continue; if (((unsharp_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) == 0)) { SetPixelChannel(unsharp_image,channel,p[i],q); continue; } pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q); if (fabs(2.0*pixel) < quantum_threshold) pixel=(double) p[i]; else pixel=(double) p[i]+gain*pixel; SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(unsharp_image); } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_UnsharpMaskImage) #endif proceed=SetImageProgress(image,SharpenImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
openmp-ex16.c
/* This example for computing pi is adapted from Hager & Wellein, Listing 6.2. * * We compute $\pi = \int_0^1 \frac{4}{1 + x^2} dx$. * * In this example we use two-point Gaussian quadrature rule. */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <unistd.h> #include <tictoc.h> int main(int argc, char **argv) { int i, j, M = 10, N = 10; double h, phi[2], relerr, pi = M_PI; double mintime = 0., maxtime = 0., avgtime = 0.; TicTocTimer timer; /* if this program is run with an argument, take the first argument to be N */ if (argc > 1) { N = atoi(argv[1]); } /* h is the width of each interval */ h = 1. / N; /* compute the quadrature points and weights */ phi[0] = (-sqrt(1./3.) + 1.) / 2.; phi[1] = ( sqrt(1./3.) + 1.) / 2.; for (j = 0; j < M; j++) { double time; pi = 0.; timer = tic(); #pragma omp parallel for reduction(+:pi) for (i = 0; i < N; i++) { int k; for (k = 0; k < 2; k++) { double x = h * (i + phi[k]); /* let's pretend this is a lengthier calculation */ usleep(1); pi += h * 0.5 * 4. / (1. + x*x); } } time = toc(&timer); if (j == 1) { mintime = maxtime = avgtime = time; } else if (j > 1) { mintime = time < mintime ? time : mintime; maxtime = time > maxtime ? time : maxtime; avgtime += time; } } avgtime /= (M - 1); relerr = fabs(M_PI - pi) / M_PI; printf("Computed pi %g, relative error %g\n", pi, relerr); printf("Calculation time %g [%g, %g]\n", avgtime, mintime, maxtime); return 0; }
openmp_frequency.c
/* compile: gcc -fopenmp -Wall -std=c99 openmp_frequency.c -lrt -lm -O3 -o * openmp_frequency */ /* usage ./openmp_frequency [y|n|p] [no of tests] [work factor per test] */ /* performs busywork many times, prints how long iterations take */ /* with y, uses OpenMP */ /* with p, uses pthreads + pthread_barrier_wait() */ /* with n, runs a single thread */ /* Typical test session looks like: */ /* gcc -fopenmp -Wall -std=c99 openmp_frequency.c -lrt -lm -O3 -o * openmp_frequency */ /* ./openmp_frequency n 1000000 100 */ /* ./openmp_frequency y 1000000 100 */ /* ./openmp_frequency p 1000000 100 */ /* Busywork done here isn't enough to stress CPU, so OpenMP is expected */ /* to be slower than non-OpenMP. What I'm trying to measure here is how */ /* much overhead it actually has per parallel-for start-and-stop. */ /* Amusingly, OpenMP with gcc 4.6.3 on my machine seems to go faster than */ /* the manual pthread_barrier_wait() version. I should go find out why. */ #define THREADS 8 #define _GNU_SOURCE #include <time.h> #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <string.h> #include <stdint.h> #include <math.h> #include <pthread.h> double clock_mono_us() { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return 1.0e6 * (ts.tv_sec + (ts.tv_nsec * 1e-9)); } void run_openmp_test(long runs, long iters, double *times, uint64_t *gbg) { printf("OpenMP test: %ld runs of %ld iterations.\n", runs, iters); double t0 = clock_mono_us(); for (long run = 0; run < runs; run++) { #pragma omp parallel for for (long iter = 0; iter < iters; iter++) { gbg[iter]++; } double t1 = clock_mono_us(); times[run] = t1 - t0; t0 = t1; } } void run_single_test(long runs, long iters, double *times, uint64_t *gbg) { printf("Single test: %ld runs of %ld iterations.\n", runs, iters); double t0 = clock_mono_us(); for (long run = 0; run < runs; run++) { for (long iter = 0; iter < iters; iter++) { gbg[iter]++; } double t1 = clock_mono_us(); times[run] = t1 - t0; t0 = t1; } } struct context { long runs; long iters; uint64_t *gbg; double *times; int id; pthread_barrier_t *barr_p; }; struct context contexts[THREADS]; void *run_pthread_inner(void *context_v) { struct context *context = context_v; long runs = context->runs; long iters = context->iters; long iter_share = iters / THREADS; int id = context->id; long iter_from = iter_share * id; long iter_to = iter_from + iter_share; double *times = context->times; if (context->id == (THREADS - 1)) { iter_to = iters; } uint64_t *gbg = context->gbg; pthread_barrier_t *barr_p = context->barr_p; pthread_barrier_wait(barr_p); double t0 = 0; if (id == 0) { t0 = clock_mono_us(); } for (long run = 0; run < runs; run++) { for (long iter = iter_from; iter < iter_to; iter++) { gbg[iter]++; } if (id == 0) { double t1 = clock_mono_us(); times[run] = t1 - t0; t0 = t1; } pthread_barrier_wait(barr_p); } return NULL; } void run_pthread_test(long runs, long iters, double *times, uint64_t *gbg) { printf("Thread test: %ld runs of %ld iterations.\n", runs, iters); pthread_barrier_t barr; pthread_barrier_init(&barr, NULL, THREADS); pthread_t threads[THREADS]; for (int i = 0; i < THREADS; i++) { contexts[i].runs = runs; contexts[i].iters = iters; contexts[i].times = times; contexts[i].gbg = gbg; contexts[i].id = i; contexts[i].barr_p = &barr; pthread_create(threads + i, NULL, run_pthread_inner, contexts + i); if (errno) { abort(); } /* meh, error handling */ } for (int i = 0; i < THREADS; i++) { pthread_join(threads[i], NULL); if (errno) { abort(); } /* meh, error handling */ } } #define ALLOC_WITHOUT_OVERFLOW(p, n) \ do { \ size_t to_alloc = sizeof(*(p)) * (n); \ if ((to_alloc / (n)) != sizeof(*(p))) { \ p = NULL; \ } else { \ p = malloc(to_alloc); \ } \ } while (0) int run_test(long runs, long iters, int with_loop) { double *run_times; ALLOC_WITHOUT_OVERFLOW(run_times, runs); uint64_t *iter_bits; ALLOC_WITHOUT_OVERFLOW(iter_bits, iters); if (run_times == NULL) { printf("Couldn't alloc %ld doubles.\n", runs); return 1; } if (iter_bits == NULL) { printf("Couldn't alloc %ld uint64_ts.\n", iters); return 1; } for (long run = 0; run < runs; run++) { run_times[run] = 0.0; } for (long iter = 0; iter < iters; iter++) { iter_bits[iter] = iter & 0xFFFF; } switch (with_loop) { case 0: run_single_test(runs, iters, run_times, iter_bits); break; case 1: run_openmp_test(runs, iters, run_times, iter_bits); break; case 2: run_pthread_test(runs, iters, run_times, iter_bits); break; } uint64_t ac = 0; for (long iter = 0; iter < iters; iter++) { ac += iter_bits[iter]; } double sum = 0; for (long run = 0; run < runs; run++) { sum += run_times[run]; } double mean = sum / (double)runs; double sum_squared_diff = 0; for (long run = 0; run < runs; run++) { double diff = run_times[run] - mean; sum_squared_diff += (diff * diff); } double variance = sum_squared_diff / (double)runs; double std_dev = sqrt(variance); double top = 0; for (long run = 0; run < runs; run++) { if (run_times[run] > top) { top = run_times[run]; } } double bot = top; for (long run = 0; run < runs; run++) { if (run_times[run] < bot) { bot = run_times[run]; } } printf("Garbage sum: %lu.\n", (unsigned long)ac); printf("(garbage should be equal between different loop types)\n"); printf("times: mean %fus, std_dev %fus.\n", mean, std_dev); printf("times: min %fus, max %fus.\n", bot, top); long howmany, prev_howmany = 0; double threshold = 0, each_diff = top / 25.0; do { howmany = 0; for (long run = 0; run < runs; run++) { if (run_times[run] >= threshold) { howmany++; } } if ((howmany > 1) && (prev_howmany != howmany)) { printf("runs: %9ld took >=%fus\n", howmany, threshold); } threshold += each_diff; prev_howmany = howmany; } while (howmany > 1); printf("runs: %9ld took ==%fus\n", 1L, top); free(run_times); run_times = NULL; free(iter_bits); iter_bits = NULL; return 0; } int main(int argc, char **argv) { long runs = 5; long iters = 1024; int with_loop = 0; int fail = 0; if (argc > 4) { printf("%d is too many arguments.\n", argc - 1); fail = 1; } if (argc > 3) { iters = strtol(argv[3], NULL, 10); if ((errno) || (iters <= 0)) { printf("%s is not an in-range integer.\n", argv[3]); fail = 1; } } if (argc > 2) { runs = strtol(argv[2], NULL, 10); if ((errno) || (runs <= 0)) { printf("%s is not an in-range integer.\n", argv[2]); fail = 1; } } if (argc > 1) { if (strcmp(argv[1], "p") == 0) { with_loop = 2; } else if (strcmp(argv[1], "y") == 0) { with_loop = 1; } else if (strcmp(argv[1], "n") == 0) { with_loop = 0; } else { printf("%s is not in 'y', 'n', 'p'.\n", argv[1]); fail = 1; } } if (fail) { fprintf(stderr, "Usage: openmp_frequency [y|n] [5] [1024]\n"); return 1; } return run_test(runs, iters, with_loop); }
hello-omp.c
/* * Copyright (c) 2020 Martin Storsjo * * This file is part of llvm-mingw. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <stdio.h> #include <omp.h> int main(int argc, char *argv[]) { #pragma omp parallel printf("thread %d, nthreads %d\n", omp_get_thread_num(), omp_get_num_threads()); return 0; }
compatibility.h
// -*- C++ -*- // Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/compatibility.h * @brief Compatibility layer, mostly concerned with atomic operations. * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Felix Putze. #ifndef _GLIBCXX_PARALLEL_COMPATIBILITY_H #define _GLIBCXX_PARALLEL_COMPATIBILITY_H 1 #include <parallel/types.h> #include <parallel/base.h> #if defined(__SUNPRO_CC) && defined(__sparc) #include <sys/atomic.h> #endif #if !defined(_WIN32) || defined (__CYGWIN__) #include <sched.h> #endif #if defined(_MSC_VER) #include <Windows.h> #include <intrin.h> #undef max #undef min #endif #ifdef __MINGW32__ // Including <windows.h> will drag in all the windows32 names. Since // that can cause user code portability problems, we just declare the // one needed function here. extern "C" __attribute((dllimport)) void __attribute__((stdcall)) Sleep (unsigned long); #endif namespace __gnu_parallel { #if defined(__ICC) template<typename must_be_int = int> int32 faa32(int32* x, int32 inc) { asm volatile("lock xadd %0,%1" : "=r" (inc), "=m" (*x) : "0" (inc) : "memory"); return inc; } #if defined(__x86_64) template<typename must_be_int = int> int64 faa64(int64* x, int64 inc) { asm volatile("lock xadd %0,%1" : "=r" (inc), "=m" (*x) : "0" (inc) : "memory"); return inc; } #endif #endif // atomic functions only work on integers /** @brief Add a value to a variable, atomically. * * Implementation is heavily platform-dependent. * @param ptr Pointer to a 32-bit signed integer. * @param addend Value to add. */ inline int32 fetch_and_add_32(volatile int32* ptr, int32 addend) { #if defined(__ICC) //x86 version return _InterlockedExchangeAdd((void*)ptr, addend); #elif defined(__ECC) //IA-64 version return _InterlockedExchangeAdd((void*)ptr, addend); #elif defined(__ICL) || defined(_MSC_VER) return _InterlockedExchangeAdd(reinterpret_cast<volatile long*>(ptr), addend); #elif defined(__GNUC__) return __sync_fetch_and_add(ptr, addend); #elif defined(__SUNPRO_CC) && defined(__sparc) volatile int32 before, after; do { before = *ptr; after = before + addend; } while (atomic_cas_32((volatile unsigned int*)ptr, before, after) != before); return before; #else //fallback, slow #pragma message("slow fetch_and_add_32") int32 res; #pragma omp critical { res = *ptr; *(ptr) += addend; } return res; #endif } /** @brief Add a value to a variable, atomically. * * Implementation is heavily platform-dependent. * @param ptr Pointer to a 64-bit signed integer. * @param addend Value to add. */ inline int64 fetch_and_add_64(volatile int64* ptr, int64 addend) { #if defined(__ICC) && defined(__x86_64) //x86 version return faa64<int>((int64*)ptr, addend); #elif defined(__ECC) //IA-64 version return _InterlockedExchangeAdd64((void*)ptr, addend); #elif defined(__ICL) || defined(_MSC_VER) #ifndef _WIN64 _GLIBCXX_PARALLEL_ASSERT(false); //not available in this case return 0; #else return _InterlockedExchangeAdd64(ptr, addend); #endif #elif defined(__GNUC__) && defined(__x86_64) return __sync_fetch_and_add(ptr, addend); #elif defined(__GNUC__) && defined(__i386) && \ (defined(__i686) || defined(__pentium4) || defined(__athlon)) return __sync_fetch_and_add(ptr, addend); #elif defined(__SUNPRO_CC) && defined(__sparc) volatile int64 before, after; do { before = *ptr; after = before + addend; } while (atomic_cas_64((volatile unsigned long long*)ptr, before, after) != before); return before; #else //fallback, slow #if defined(__GNUC__) && defined(__i386) // XXX doesn't work with -march=native //#warning "please compile with -march=i686 or better" #endif #pragma message("slow fetch_and_add_64") int64 res; #pragma omp critical { res = *ptr; *(ptr) += addend; } return res; #endif } /** @brief Add a value to a variable, atomically. * * Implementation is heavily platform-dependent. * @param ptr Pointer to a signed integer. * @param addend Value to add. */ template<typename T> inline T fetch_and_add(volatile T* ptr, T addend) { if (sizeof(T) == sizeof(int32)) return (T)fetch_and_add_32((volatile int32*) ptr, (int32)addend); else if (sizeof(T) == sizeof(int64)) return (T)fetch_and_add_64((volatile int64*) ptr, (int64)addend); else _GLIBCXX_PARALLEL_ASSERT(false); } #if defined(__ICC) template<typename must_be_int = int> inline int32 cas32(volatile int32* ptr, int32 old, int32 nw) { int32 before; __asm__ __volatile__("lock; cmpxchgl %1,%2" : "=a"(before) : "q"(nw), "m"(*(volatile long long*)(ptr)), "0"(old) : "memory"); return before; } #if defined(__x86_64) template<typename must_be_int = int> inline int64 cas64(volatile int64 *ptr, int64 old, int64 nw) { int64 before; __asm__ __volatile__("lock; cmpxchgq %1,%2" : "=a"(before) : "q"(nw), "m"(*(volatile long long*)(ptr)), "0"(old) : "memory"); return before; } #endif #endif /** @brief Compare @c *ptr and @c comparand. If equal, let @c * *ptr=replacement and return @c true, return @c false otherwise. * * Implementation is heavily platform-dependent. * @param ptr Pointer to 32-bit signed integer. * @param comparand Compare value. * @param replacement Replacement value. */ inline bool compare_and_swap_32(volatile int32* ptr, int32 comparand, int32 replacement) { #if defined(__ICC) //x86 version return _InterlockedCompareExchange((void*)ptr, replacement, comparand) == comparand; #elif defined(__ECC) //IA-64 version return _InterlockedCompareExchange((void*)ptr, replacement, comparand) == comparand; #elif defined(__ICL) || defined(_MSC_VER) return _InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr), replacement, comparand) == comparand; #elif defined(__GNUC__) return __sync_bool_compare_and_swap(ptr, comparand, replacement); #elif defined(__SUNPRO_CC) && defined(__sparc) return atomic_cas_32((volatile unsigned int*)ptr, comparand, replacement) == comparand; #else #pragma message("slow compare_and_swap_32") bool res = false; #pragma omp critical { if (*ptr == comparand) { *ptr = replacement; res = true; } } return res; #endif } /** @brief Compare @c *ptr and @c comparand. If equal, let @c * *ptr=replacement and return @c true, return @c false otherwise. * * Implementation is heavily platform-dependent. * @param ptr Pointer to 64-bit signed integer. * @param comparand Compare value. * @param replacement Replacement value. */ inline bool compare_and_swap_64(volatile int64* ptr, int64 comparand, int64 replacement) { #if defined(__ICC) && defined(__x86_64) //x86 version return cas64<int>(ptr, comparand, replacement) == comparand; #elif defined(__ECC) //IA-64 version return _InterlockedCompareExchange64((void*)ptr, replacement, comparand) == comparand; #elif defined(__ICL) || defined(_MSC_VER) #ifndef _WIN64 _GLIBCXX_PARALLEL_ASSERT(false); //not available in this case return 0; #else return _InterlockedCompareExchange64(ptr, replacement, comparand) == comparand; #endif #elif defined(__GNUC__) && defined(__x86_64) return __sync_bool_compare_and_swap(ptr, comparand, replacement); #elif defined(__GNUC__) && defined(__i386) && \ (defined(__i686) || defined(__pentium4) || defined(__athlon)) return __sync_bool_compare_and_swap(ptr, comparand, replacement); #elif defined(__SUNPRO_CC) && defined(__sparc) return atomic_cas_64((volatile unsigned long long*)ptr, comparand, replacement) == comparand; #else #if defined(__GNUC__) && defined(__i386) // XXX -march=native //#warning "please compile with -march=i686 or better" #endif #pragma message("slow compare_and_swap_64") bool res = false; #pragma omp critical { if (*ptr == comparand) { *ptr = replacement; res = true; } } return res; #endif } /** @brief Compare @c *ptr and @c comparand. If equal, let @c * *ptr=replacement and return @c true, return @c false otherwise. * * Implementation is heavily platform-dependent. * @param ptr Pointer to signed integer. * @param comparand Compare value. * @param replacement Replacement value. */ template<typename T> inline bool compare_and_swap(volatile T* ptr, T comparand, T replacement) { if (sizeof(T) == sizeof(int32)) return compare_and_swap_32((volatile int32*) ptr, (int32)comparand, (int32)replacement); else if (sizeof(T) == sizeof(int64)) return compare_and_swap_64((volatile int64*) ptr, (int64)comparand, (int64)replacement); else _GLIBCXX_PARALLEL_ASSERT(false); } /** @brief Yield the control to another thread, without waiting for the end to the time slice. */ inline void yield() { #if defined (_WIN32) && !defined (__CYGWIN__) Sleep(0); #else sched_yield(); #endif } } // end namespace #endif /* _GLIBCXX_PARALLEL_COMPATIBILITY_H */
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(8*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(4*t1+Ny+5,8)),floord(8*t2+Ny+4,8)),floord(8*t1-8*t2+Nz+Ny+3,8));t3++) { for (t4=max(max(max(0,ceild(t1-63,64)),ceild(8*t2-Nz-252,256)),ceild(8*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(4*t1+Nx+5,256)),floord(8*t2+Nx+4,256)),floord(8*t3+Nx+4,256)),floord(8*t1-8*t2+Nz+Nx+3,256));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),8*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),8*t3+6),256*t4+254),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(256*t4,t5+1); ubv=min(256*t4+255,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
examine.c
#include <stdlib.h> #include <stdio.h> #include <omp.h> #include <time.h> #include <string.h> #include <time.h> #define UPLIMIT 30 #define DOWNLIMIT 12 int main(int argc, char *argv[]) { FILE *fp; long size; const char temp[2] = " "; double temp2; long long count=0; size_t result; char *buffer; struct timespec start, end,middle1,middle2; const char *filename; filename = argv[1]; fp = fopen(filename,"r"); fseek(fp,0,SEEK_END); size = ftell(fp); rewind(fp); buffer = (char*) malloc (sizeof(char)*size); result = fread(buffer,1,size,fp); long long i; long long j=0; long long count2=0; int threads = atoi(argv[2]); printf("%d\n",threads); omp_set_num_threads(threads); int b; char *token; char **buffer2; int *mikos; clock_gettime(CLOCK_MONOTONIC, &start); for(i=0;i<result;i++) { if(buffer[i]=='\n') { count2++; } } buffer2 = malloc (sizeof(char*)*count2); for(i=0;i<count2;i++) { buffer2[i] = malloc (sizeof(char)*31); } int row=0; j=0; for(i=0;i<result;i++) { if(buffer[i]=='\n') { row++; j=0; } else { buffer2[row][j]=buffer[i]; j++; } } for(i=0;i<count2;i++) { b=0; token=strsep(&buffer2[i],temp); while(token!=NULL) { sscanf(token,"%lf",&temp2); if(temp2>DOWNLIMIT && temp2<UPLIMIT) { b++; } token=strsep(&buffer2[i],temp); } if(b==3) { count++; } } #pragma omp master clock_gettime (CLOCK_MONOTONIC, &end); } const int DAS_NANO_SECONDS_IN_SEC = 1000000000; long timeElapsed_s = end.tv_sec -start.tv_sec; long timeElapsed_n = end.tv_nsec-start.tv_nsec; if ( timeElapsed_n < 0 ) {timeElapsed_n = DAS_NANO_SECONDS_IN_SEC + timeElapsed_n; timeElapsed_s--;} printf("Time: %ld.%09ld secs \n",timeElapsed_s,timeElapsed_n); printf("%lld",count); return 0; }
openmp-util.h
/* * OpenMP multithreading setup util for VMD tcl plugins. * * Copyright (c) 2006-2009 akohlmey@cmm.chem.upenn.edu */ #ifndef OPENMP_UTIL_H #define OPENMP_UTIL_H #include <tcl.h> #if defined(_OPENMP) #include <stdio.h> #include <stdlib.h> #include <omp.h> static int nthr = 0; #endif /* this results in an empty function without OpenMP */ static void check_thread_count(Tcl_Interp *interp, const char *name) { #if defined(_OPENMP) char *forcecount, buffer[256]; int newcpucount = nthr; /* Handle VMD's way to allow the user to override the number * of CPUs for use in scalability testing, debugging, etc. */ forcecount = getenv("VMDFORCECPUCOUNT"); if (forcecount != NULL) { if (sscanf(forcecount, "%d", &newcpucount) != 1) { newcpucount=1; } omp_set_num_threads(newcpucount); } /* first time setup */ if (newcpucount < 1) { #pragma omp parallel shared(nthr) { #pragma omp master { newcpucount = omp_get_num_threads(); } } } /* print a message to the console, whenever the number of threads changes. */ if (nthr!=newcpucount) { nthr=newcpucount; sprintf(buffer,"vmdcon -info \"'%s' will use %d thread%s through OpenMP.\"\n", name, nthr, (nthr>1)? "s":""); Tcl_Eval(interp,buffer); } #endif } #endif /* OPENMP_UTIL_H */
kmp_set_dispatch_buf.c
// RUN: %libomp-compile && %libomp-run 7 // RUN: %libomp-run 0 && %libomp-run -1 // RUN: %libomp-run 1 && %libomp-run 2 && %libomp-run 5 // RUN: %libomp-compile -DMY_SCHEDULE=guided && %libomp-run 7 // RUN: %libomp-run 1 && %libomp-run 2 && %libomp-run 5 // UNSUPPORTED: clang-11, clang-12 #include <stdio.h> #include <omp.h> #include <stdlib.h> #include <limits.h> #include "omp_testsuite.h" #define INCR 7 #define MY_MAX 200 #define MY_MIN -200 #ifndef MY_SCHEDULE # define MY_SCHEDULE dynamic #endif int num_disp_buffers, num_loops; int a, b, a_known_value, b_known_value; int test_kmp_set_disp_num_buffers() { int success = 1; a = 0; b = 0; // run many small dynamic loops to stress the dispatch buffer system #pragma omp parallel { int i,j; for (j = 0; j < num_loops; j++) { #pragma omp for schedule(MY_SCHEDULE) nowait for (i = MY_MIN; i < MY_MAX; i+=INCR) { #pragma omp atomic a++; } #pragma omp for schedule(MY_SCHEDULE) nowait for (i = MY_MAX; i >= MY_MIN; i-=INCR) { #pragma omp atomic b++; } } } // detect failure if (a != a_known_value || b != b_known_value) { success = 0; printf("a = %d (should be %d), b = %d (should be %d)\n", a, a_known_value, b, b_known_value); } return success; } int main(int argc, char** argv) { int i,j; int num_failed=0; if (argc != 2) { fprintf(stderr, "usage: %s num_disp_buffers\n", argv[0]); exit(1); } // set the number of dispatch buffers num_disp_buffers = atoi(argv[1]); kmp_set_disp_num_buffers(num_disp_buffers); // figure out the known values to compare with calculated result a_known_value = 0; b_known_value = 0; // if specified to use bad num_disp_buffers set num_loops // to something reasonable if (num_disp_buffers <= 0) num_loops = 10; else num_loops = num_disp_buffers*10; for (j = 0; j < num_loops; j++) { for (i = MY_MIN; i < MY_MAX; i+=INCR) a_known_value++; for (i = MY_MAX; i >= MY_MIN; i-=INCR) b_known_value++; } for(i = 0; i < REPETITIONS; i++) { if(!test_kmp_set_disp_num_buffers()) { num_failed++; } } return num_failed; }
OmpForBodyLink.c
int main() { int i; #pragma omp for for (i = 0; i < 10; i++) { int x; } #pragma omp for for (i = 0; i < 10; i++) { int y; } }
fine_grain.c
// OpenMP library header #include <omp.h> // Standard IO libraries #include <stdio.h> #include <stdlib.h> // Math library #include <math.h> int main(int argc, char* argv[]) { int const n = pow(2, 10) + 1; int i, thread_ID, num_threads; double x[n], y[n]; double norm, true_x_norm, y_norm; // Handle setting the number of threads num_threads = 1; #ifdef _OPENMP num_threads = 8; omp_set_num_threads(num_threads); printf("Using OpenMP with %d threads.\n", num_threads); #endif // Initialize x vector #pragma parallel for for (i = 0; i < n; ++i) x[i] = (double)i; norm = 0.0; y_norm = 0.0; #pragma omp parallel { #pragma omp for reduction(+ : norm) for (i = 0; i < n; ++i) norm = norm + fabs(x[i]); #pragma omp barrier // Not srtictly needed #pragma omp for reduction(+ : y_norm) for (i = 0; i < n; ++i) { y[i] = x[i] / norm; y_norm = y_norm + fabs(y[i]); } } true_x_norm = n * (n - 1) / 2; printf("Norm of x = %f, n (n-1) / 2 = %f.\n", norm, true_x_norm); printf("Norm of y should be 1, is %f.\n", y_norm); return 0; }
dropout_op.h
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <cstring> #include <random> #include <string> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T, int MajorType = Eigen::RowMajor, typename IndexType = Eigen::DenseIndex> using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>; template <typename DeviceContext, typename T> class CPUDropoutKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input<Tensor>("X"); auto* seed = context.HasInput("Seed") ? context.Input<Tensor>("Seed") : nullptr; auto* y = context.Output<Tensor>("Out"); const auto* x_data = x->data<T>(); auto* y_data = y->mutable_data<T>(context.GetPlace()); float dropout_prob = context.Attr<float>("dropout_prob"); auto& dropout_implementation = context.Attr<std::string>("dropout_implementation"); bool upscale_in_train = (dropout_implementation == "upscale_in_train"); if (!context.Attr<bool>("is_test")) { auto* mask = context.Output<Tensor>("Mask"); auto* mask_data = mask->mutable_data<uint8_t>(context.GetPlace()); size_t size = framework::product(mask->dims()); // Special case when dropout_prob is 1.0 if (dropout_prob == 1.0f) { std::memset(y_data, 0, size * sizeof(*y_data)); // NOLINT std::memset(mask_data, 0, size * sizeof(*mask_data)); // NOLINT return; } // NOTE: fixed seed should only be used in unittest or for debug. // Guarantee to use random seed in training. std::random_device rnd; std::minstd_rand engine; int seed_data; if (seed) { seed_data = *(seed->data<int>()); } else { seed_data = context.Attr<bool>("fix_seed") ? context.Attr<int>("seed") : rnd(); } engine.seed(seed_data); std::uniform_real_distribution<float> dist(0, 1); for (size_t i = 0; i < size; ++i) { if (dist(engine) < dropout_prob) { mask_data[i] = 0; y_data[i] = 0; } else { mask_data[i] = 1; if (upscale_in_train) { y_data[i] = x_data[i] / static_cast<T>(1.0f - dropout_prob); } else { y_data[i] = x_data[i]; } } } } else { if (upscale_in_train) { const auto* X_data = x->data<T>(); auto* Y_data = y->mutable_data<T>(context.GetPlace()); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < x->numel(); i++) { Y_data[i] = X_data[i]; } } else { auto X = EigenMatrix<T>::Reshape(*x, 1); auto Y = EigenMatrix<T>::Reshape(*y, 1); auto& place = *context.template device_context<DeviceContext>().eigen_device(); Y.device(place) = X * static_cast<T>(1.0f - dropout_prob); } } } }; template <typename DeviceContext, typename T> class DropoutGradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE(!context.Attr<bool>("is_test"), "GradOp is only callable when is_test is false"); auto* grad_x = context.Output<Tensor>(framework::GradVarName("X")); auto* grad_y = context.Input<Tensor>(framework::GradVarName("Out")); auto* mask = context.Input<Tensor>("Mask"); grad_x->mutable_data<T>(context.GetPlace()); auto M = EigenMatrix<uint8_t>::Reshape(*mask, 1); auto dX = EigenMatrix<T>::Reshape(*grad_x, 1); auto dY = EigenMatrix<T>::Reshape(*grad_y, 1); auto& place = *context.template device_context<DeviceContext>().eigen_device(); auto& dropout_implementation = context.Attr<std::string>("dropout_implementation"); if (dropout_implementation == "upscale_in_train") { float dropout_prob = context.Attr<float>("dropout_prob"); if (dropout_prob == 1.0f) { dX.device(place) = static_cast<T>(0) * dY; } else { dX.device(place) = dY * M.cast<T>() / static_cast<T>(1.0f - dropout_prob); } } else { dX.device(place) = dY * M.cast<T>(); } } }; } // namespace operators } // namespace paddle
batchnorm_arm_func.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef BATCHNORM_OP #pragma once #include <cmath> #include "operators/op_param.h" #if defined(__ARM_NEON__) || defined(__ARM_NEON) #include <arm_neon.h> #endif // __ARM_NEON__ namespace paddle_mobile { namespace operators { template <typename P> void BatchnormCompute(const BatchNormParam<CPU> &param) { const float epsilon = param.Epsilon(); const float *mean_ptr = param.InputMean()->data<float>(); const float *variance_ptr = param.InputVariance()->data<float>(); const float *scale_ptr = param.InputScale()->data<float>(); const float *bias_ptr = param.InputBias()->data<float>(); const framework::Tensor *input = param.InputX(); const float *input_ptr = input->data<float>(); framework::Tensor *output = param.OutputY(); float *output_ptr = output->mutable_data<float>(); size_t spatial_size = output->dims()[2] * output->dims()[3]; int channels = output->dims()[1]; #pragma omp parallel for collapse(2) for (int batch = 0; batch < output->dims()[0]; ++batch) { for (int c = 0; c < channels; ++c) { float inv_scale = 1.f / (std::sqrt(variance_ptr[c] + epsilon)); float bias = bias_ptr[c] - inv_scale * scale_ptr[c] * mean_ptr[c]; float scale = inv_scale * scale_ptr[c]; size_t offset = (batch * channels + c) * spatial_size; const float *x = input_ptr + offset; float *y = output_ptr + offset; size_t remain = spatial_size; #if defined(__ARM_NEON__) || defined(__ARM_NEON) int loop = spatial_size >> 4; remain = spatial_size & 0xF; float32x4_t __scale = vdupq_n_f32(scale); float32x4_t __bias = vdupq_n_f32(bias); for (int k = 0; k < loop; ++k, x += 16, y += 16) { float32x4_t r0 = vld1q_f32(x); float32x4_t r1 = vld1q_f32(x + 4); float32x4_t r2 = vld1q_f32(x + 8); float32x4_t r3 = vld1q_f32(x + 12); r0 = vmlaq_f32(__bias, __scale, r0); r1 = vmlaq_f32(__bias, __scale, r1); r2 = vmlaq_f32(__bias, __scale, r2); r3 = vmlaq_f32(__bias, __scale, r3); vst1q_f32(y, r0); vst1q_f32(y + 4, r1); vst1q_f32(y + 8, r2); vst1q_f32(y + 12, r3); } #endif // __ARM_NEON__ for (int k = 0; k < remain; ++k) { y[k] = scale * x[k] + bias; } } } } } // namespace operators } // namespace paddle_mobile #endif
GB_unaryop__lnot_uint64_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_bool // op(A') function: GB_tran__lnot_uint64_bool // C type: uint64_t // A type: bool // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_bool ( uint64_t *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
shape.h
/* * shape.h * * Created on: Dec 28, 2015 * Author: agibsonccc */ #ifndef SHAPE_H_ #define SHAPE_H_ #include <cstring> #include <cstdio> #include "../dll.h" #include "../nd4jmalloc.h" #include "../templatemath.h" #include "../helpers/logger.h" #include "../pointercast.h" #include "../cnpy/cnpy.h" #include <op_boilerplate.h> #define MAX_DIMENSION 0x7fffffff #define MAX_NUM_THREADS 1024 #define MAX_RANK 32 #define MAX_COORD 3 #define PREALLOC_SIZE 33554432 #ifdef __CUDACC__ #include <cuda.h> #include <cuda_runtime.h> #include <helpers/sharedmem.h> #endif #ifdef __CUDACC__ #define INLINEDEF inline #else #define INLINEDEF inline #endif #include "../pairwise_util.h" #include <stdint.h> #include <array/ArrayOptions.h> namespace shape { /** * Shape information approximating * the information on an ndarray */ struct ND4J_EXPORT ShapeInformation { _CUDA_HD ShapeInformation(Nd4jLong *shape_ = nullptr, Nd4jLong *stride_ = nullptr, char order_ = 0, int rank_ = 0, int offset_ = 0, int elementWiseStride_ = 0) : shape(shape_), stride(stride_), order(order_), rank(rank_), offset(offset_), elementWiseStride(elementWiseStride_) {} Nd4jLong *shape; Nd4jLong *stride; char order; int rank; int offset; int elementWiseStride; }; /** * Indexing information * for bounds checking */ struct ND4J_EXPORT CurrentIndexing { int numElementsPerThread; int blockStartingIndex; int startingThreadIndex; int endingThreadIndex; }; ND4J_EXPORT _CUDA_HD bool shapeEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2); ND4J_EXPORT _CUDA_HD Nd4jLong* detachShape(Nd4jLong *originalShape); ND4J_EXPORT _CUDA_HD Nd4jLong* copyShape(Nd4jLong *originalShape); ND4J_EXPORT _CUDA_HD bool shapeEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2); ND4J_EXPORT _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2); ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2); ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1,Nd4jLong *stride2,int rank2); ND4J_EXPORT _CUDA_HD bool equalsSoft(Nd4jLong *shapeA, Nd4jLong *shapeB); ND4J_EXPORT _CUDA_HD bool equalsStrict(Nd4jLong *shapeA, Nd4jLong *shapeB); ND4J_EXPORT _CUDA_HD int sizeAt(Nd4jLong *shape, int dim); template <typename T> ND4J_EXPORT _CUDA_HD void fill(T* buffer, T value, Nd4jLong length); ND4J_EXPORT _CUDA_HD void traceNew(int id); ND4J_EXPORT _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength); ND4J_EXPORT _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength); ND4J_EXPORT _CUDA_HD bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShape, bool isFOrder); ND4J_EXPORT _CUDA_HD bool reshapeCF(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShape, bool isFOrder, Nd4jLong* target); /** * Get the shape info buffer * for the given rank and shape. */ ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, Nd4jLong *shape); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *buffer); /** * Get the shape info buffer * for the given rank and shape. */ ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, Nd4jLong *shape); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, Nd4jLong *shape, Nd4jLong *output); //ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *tmpBuffer); ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer); #ifdef __CUDACC__ template <typename T> __device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager); __device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size); #endif /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank); ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank); ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret); ND4J_EXPORT _CUDA_HD void updateStrides(Nd4jLong *shape, const char order); // check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1 template <typename T> ND4J_EXPORT _CUDA_HD bool isDimPermuted(const T* dimensions, const int dimSize); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum); ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum); ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret); /** * @param toCopy the shape to copy * @return a copy of the original struct */ ND4J_EXPORT _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy); ND4J_EXPORT _CUDA_HD bool strideDescendingCAscendingF(Nd4jLong *shapeBuffer); /** * Compute the element wise stride * for a given shape/stride configuration * @param rank the rank of the shape/stride * @param shape the shape * @param stride the stride * @param isFOrder 0 or 1 for whether the array is f * ordered or not * @return -1 if there is no element wise stride the * element wise stride of reshape(1,length) otherwise */ ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder); /** * Compute the element wise stride * for a given shape/stride configuration * @param rank the rank of the shape/stride * @param shape the shape * @param stride the stride * @param isFOrder 0 or 1 for whether the array is f * ordered or not * @return -1 if there is no element wise stride the * element wise stride of reshape(1,length) otherwise */ ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder, Nd4jLong *dimension, int dimensionLength); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer); /** * * @param length * @param shape * @param rearrange * @return */ ND4J_EXPORT _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int* rearrange); /** * In place permute swap * @param length * @param shape * @param rearrange */ ND4J_EXPORT _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int* rearrange); ND4J_EXPORT _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange); ND4J_EXPORT _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *out); ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const int *rearrange); ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const Nd4jLong *rearrange); ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange); ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int* rearrange); /** * Rearrange the permute indexes * according to which dimensions are specified. * * For example, dimension is implicitly: * 0,1,2 * * If you want to do a reduce along dimensions 0 and 1, * you need to permute the indexes to be: * 2,0,1 * * which will give us the ability to ierate along an element * wise stride. */ ND4J_EXPORT _CUDA_HD Nd4jLong* createPermuteIndexes(int originalRank, int *dimension,int dimensionLength); ND4J_EXPORT _CUDA_HD Nd4jLong* computeResultShape(Nd4jLong *originalShapeBuffer, int *dimension,int dimensionLength); /** * This method does inplace transpose of given shapeBuffer * * @param shapeBuffer */ ND4J_EXPORT _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer); /** * Get the ordering for the device * @param length * @param shape * @param stride * @param elementStride * @return */ ND4J_EXPORT _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride); /** * Ensure that every value in the re arrange * array is unique * @param arr * @param shape * @param arrLength * @param shapeLength * @return */ template <typename T> ND4J_EXPORT _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength); /** * Permute the shape information * @param info the shape information to permute * @param rearrange the order to re arrange * @param rank the rank of the rearrange array */ ND4J_EXPORT _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank); /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of cthe shape */ ND4J_EXPORT _CUDA_HD int isVector(Nd4jLong *shape, int rank); /** * When 1 dimension is the whole length of the * array */ ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank); ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD int isVector(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim); ND4J_EXPORT _CUDA_HD bool isRowVector(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo); /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ ND4J_EXPORT _CUDA_HD int isMatrix(Nd4jLong *shape, int rank); INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo); /** * Returns the shape portion of an information * buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy); template <typename T> ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> ND4J_EXPORT _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ ND4J_EXPORT _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes); /** * Permute the given strides * in the given rearrange order * @param toPermute the buffer to permute * @param shapeRank the length of the buffer to permute * @param rearrange the rearrange order (must be 0 based indexes * and all must be filled in) * @return the rearranged array */ //ND4J_EXPORT _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, Nd4jLong *rearrange); /** * Return the slice (shape + 1 in pointer arithmetic) * @param shape the shape to take the slice of * @return the shape array - the first entry */ ND4J_EXPORT _CUDA_HD Nd4jLong *slice(Nd4jLong *shape); ND4J_EXPORT _CUDA_HD int slices(Nd4jLong *shapeBuffer); ND4J_EXPORT _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer); /** * Returns the length of the * shape information buffer: * rank * 2 + 3 * @param rank the rank to get the shape * info length for * @return rank * 2 + 4 */ ND4J_EXPORT _CUDA_HD int shapeInfoLength(int rank); ND4J_EXPORT _CUDA_HD int shapeInfoLength(Nd4jLong* shapeInfo); ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(int rank); ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(Nd4jLong* shapeInfo); /** * Returns the rank portion of * an information buffer */ ND4J_EXPORT _CUDA_HD int rank( Nd4jLong *buffer); /** * Converts a raw int buffer of the layout: * rank * shape * stride * offset * elementWiseStride * * where shape and stride are both straight int pointers */ ND4J_EXPORT _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer); /** * Returns the stride portion of an information * buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong *stride(Nd4jLong *buffer); /** * Compute the length of the given shape */ ND4J_EXPORT _CUDA_HD bool isEmpty(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD Nd4jLong length(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape); ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape); /*** * Returns the offset portion of an information buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong offset(Nd4jLong *buffer); ND4J_EXPORT _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer); /** * Returns the ordering * for this shape information buffer */ ND4J_EXPORT _CUDA_HD char order(Nd4jLong *buffer); /** * Returns the element wise stride for this information * buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong elementWiseStride(Nd4jLong *buffer); /** * Returns the element wise stride for this information * buffer * relative to a dimension and ordering for a reduction index */ ND4J_EXPORT _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong *buffer, int *dimension, int dimensionLength); /** * Returns whether * the given shape info buffer * represents a scalar shape */ ND4J_EXPORT _CUDA_HD int isScalar(Nd4jLong *info); /** * Returns whether * the given shape information * represents a scalar * shape or not */ ND4J_EXPORT _CUDA_HD int isScalar(volatile ShapeInformation *info); /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> ND4J_EXPORT _CUDA_HD void removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *out); /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> ND4J_EXPORT _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength); /** * Iterate over a given set of indexes * the begin and end indexes are 0 based. * 1 padding is automatically assumed for the ending. * * For example if you want to iterate over 0 to 4 * it will go to 4 rather than 3. * * indexes should be the indexes to exclude * indexes length should be the length of indexes */ ND4J_EXPORT _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end); /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ //#ifdef __CUDACC__ // __device__ //#endif // ND4J_EXPORT int tadOffset(shape::ShapeInformation *xInfo, int offset); /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ ND4J_EXPORT _CUDA_HD Nd4jLong* ensureVectorShape(Nd4jLong *shape); ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo(); ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret); /** * Generate an int buffer * up to the given length * at the specified increment * */ template <typename T> ND4J_EXPORT _CUDA_HD T* range(int from, int to, int increment); /** * Range between from and two with an * increment of 1 */ template <typename T> ND4J_EXPORT _CUDA_HD T* range(int from, int to); /** * Keep the given indexes * in the data */ ND4J_EXPORT _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength); /** * Generate reverse copy of the data * @param data * @param length * @return */ template <typename T> ND4J_EXPORT _CUDA_HD T* reverseCopy(T *data, Nd4jLong length); template <typename T> ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length); template <typename T> ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length); template <typename T1, typename T2> ND4J_EXPORT _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length); /** * * @param arr1 * @param arr1Length * @param arr2 * @param arr2Length * @return */ template <typename T> ND4J_EXPORT _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length); /** * * @param numArrays * @param numTotalElements * @param arr * @param lengths * @return */ template <typename T> ND4J_EXPORT _CUDA_HD T* concat(int numArrays, int numTotalElements, Nd4jLong **arr, Nd4jLong *lengths); /** * Get the length per slice of the * given shape and the dimension * @param rank the rank of the shape * @param shape the shape of to get * the length per slice for * @param dimension the dimension to * get the length per slice for * @param dimensionLength the length of the dimension array * @return the length per slice of the given shape * along the given dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int *dimension, int dimensionLength); /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank, int index, Nd4jLong *shape, Nd4jLong *tensorShape, int tensorShapeLength, int *dimension, int dimensionLength); /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2); /** * Computes the tensor along dimension * offset * @param index the index to get the offset for the tad for * @param rank the rank of the shapes and strides * @param info the shape information to use for tad * @param dimension the dimensions to use for computing the tensor along dimensions */ // ND4J_EXPORT _CUDA_HD int offset(int index, // int rank, // shape::ShapeInformation *info, // Nd4jLong *dimension, // int dimensionLength); /** * Computes the number * of tensors along * a given dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(int rank, volatile int length, volatile Nd4jLong *shape, int *dimension, int dimensionLength); /** * Computes the number * of tensors along * a given dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength); /** * Returns the tensor along dimension * for the given block index * @param blockSize * @param blockIdx * @param i * @return */ ND4J_EXPORT _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i); /** * Computes the number of tads per block * */ ND4J_EXPORT _CUDA_HD int tadsPerBlock(int blockSize, int tads); // ND4J_EXPORT _CUDA_HD Nd4jLong *tadShapeInfo(int index, Nd4jLong *xShapeInfo, Nd4jLong *dimension, // int dimensionLength); /** * Returns a shape buffer * for the shape information metadata. */ ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info); ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret); /** * Returns the number of elements per thread */ //#ifdef __CUDACC__ // __device__ //#endif // int numElementsPerThread(int N); /** * Returns the block starting index */ //#ifdef __CUDACC__ // __device__ //#endif // int blockStartingIndex(int N); /** * Returns the thread starting index */ //#ifdef __CUDACC__ // __device__ //#endif // int threadStartingIndex(int N, int stride, int offset); /** * Returns the thread ending index */ //#ifdef __CUDACC__ // __device__ //#endif // int threadEndingIndex(int N, int stride, int offset); /** * Returns indexing information * for the current kernel invocation */ //#ifdef __CUDACC__ // __device__ //#endif // CurrentIndexing *currentIndex(int N, int offset, int stride); /** Given an linear index, element wise stride * and the length of each tad * map a linear index to a tad * @param i the index to map * @param the element wise stride for the tads * @param numElementsPerTad the number of elements * per tad */ ND4J_EXPORT _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad); /** * Map a tad to a * reduction index. * @param tadIndexForOriginal the original tad index for the * split up problem (eg: split is dimension 3 mapping to a 2,3 problem) * @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3) * @param tadsForOriginal the number of tads for the smaller problem (eg: 3) */ ND4J_EXPORT _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced, int tadsForOriginal); /** * Computes the number of tads * per reduce index for the * reduction tad. */ ND4J_EXPORT _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal); /** * Maps a linear index to a reduction index * @param i the linear index to map * @param elementWiseStride the element wise stride * for the multiple problem * @param tadNum the number of tads for the shrunken problem * @param originalTadNum the tad number for the reduced version of the problem */ ND4J_EXPORT _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad, int tadNum, int originalTadNum); /** * Returns the prod of the data * up to the given length */ ND4J_EXPORT _CUDA_HD int prod(Nd4jLong *data, int length); ND4J_EXPORT _CUDA_HD Nd4jLong prodLong( Nd4jLong *data, int length); /** * Returns the rear most left over item not present in * the dimension array. This assumes that the dimension array is sorted. * * For example, given a dimension array of: * 0,2 * * and * * 12,4,2,1 in data * * You end up with 1 (data[3]) * since the first item won't match * the last item of the dimension array */ // ND4J_EXPORT _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data,int length,Nd4jLong *dimension,int dimensionLength); /** * Get an offset for retrieval * from a data buffer * based on the given * shape stride and given indices * @param baseOffset the offset to start from * @param shape the shape of the array * @param stride the stride of the array * @param indices the indices to iterate over * @return the double at the specified index */ ND4J_EXPORT _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, Nd4jLong *shape, Nd4jLong *stride, Nd4jLong *indices,int rank); ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank); ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices); ND4J_EXPORT _CUDA_HD Nd4jLong *ind2sub(int rank, Nd4jLong *shape, Nd4jLong index); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2sub(int rank,Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices,Nd4jLong *out); /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong *out); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(int rank, Nd4jLong *shape, Nd4jLong index); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *out); /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong *out); /** * Convert the given index (such as 1,1) * to a linear index * @param shape the shape of the indexes to convert * @param indices the index to convert * @return the linear index given the shape * and indices */ ND4J_EXPORT _CUDA_HD int sub2Ind(int rank, Nd4jLong *shape, Nd4jLong *indices); /** * Compute the real linear indices for the given shape and stride */ ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride); /** * Compute the real linear indices for the * given shape buffer. Shape,stride and rank are derived * from the buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices( Nd4jLong *shapeBuffer); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong numIndices,Nd4jLong *out); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index,Nd4jLong *out); ND4J_EXPORT _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, Nd4jLong *shape, Nd4jLong *strides); ND4J_EXPORT _CUDA_HD void printIntArray(Nd4jLong *arr,int length); ND4J_EXPORT _CUDA_HD void printArray(float *arr,int length); ND4J_EXPORT _CUDA_HD Nd4jLong* shapeBufferOfNpy(int rank, unsigned int *shape,bool fortranOrder); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr); // ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer); // this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions) // also sort input array of dimensions, this operation is also necessary for creating TAD object ND4J_EXPORT _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions); // return absolute index of array min, min is sub-array of max, index to be returned is min index and corresponds to maxIdx of max array ND4J_EXPORT _CUDA_HD Nd4jLong subArrayIndex(const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int maxIdx); ND4J_EXPORT _CUDA_HD void shapeScalar(Nd4jLong* const buffer); ND4J_EXPORT _CUDA_HD void shapeVector(const Nd4jLong length, Nd4jLong* const buffer); ND4J_EXPORT _CUDA_HD void shapeOldScalar(Nd4jLong* const buffer, const char order); //END HEADERS //BEGIN IMPLEMENTATIONS #ifdef __CUDACC__ template <typename T> __device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager) { // if we go for 3 dimensions coord space or below - just use shared memory for that if (size <= MAX_COORD * 4) { Nd4jLong *ptr = new Nd4jLong[size / 4];//manager->getSharedCoordBuffer() + (threadIdx.x * MAX_COORD); return ptr; } else { // otherwise go to preallocated global memory :( int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid * size > PREALLOC_SIZE - size) { return (Nd4jLong *) malloc(size); } else { Nd4jLong *ret = buffer; ret += (tid * size); return ret; } } } #endif #ifdef __CUDACC__ /** * BEWARE: THIS METHOD DOES NOT CHECKS ALLOCATION BOUNDARIES */ __device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size) { Nd4jLong *ret = buffer; ret += (threadIdx.x * size); return ret; } #endif /** * Length of a tad given * the shape information */ INLINEDEF _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) { if(dimensionLength == 1) { return shape::shapeOf(shapeInfo)[dimension[0]]; } else { int ret = 1; for(int i = 0; i < shape::rank(shapeInfo); i++) { for(int j = 0; j < dimensionLength; j++) { if(i == dimension[j]) ret *= shape::shapeOf(shapeInfo)[dimension[j]]; } } return ret; } } /** * Tad element wise stride: * given the inner most dimension (the sorted dimension of the last) * the element wise stride of the tad (disregarding order) is the * last dimension's stride. * * For a given singular dimension this will just be the only entry. * For example, given the following c order shape/stride: * 2,2,3,2 * 12,6,2,1 * * The tad element wise stride for 3 will be 1. * For zero it wil be 12 * * For 2,3 it's 1 * * Note here that the multi dimensional 2,3 case * is equivalent to the singular 3 case. * * * Note that this is for the dimension that ultimately * ends up removed. * * Again: this may not preserve ordering of the tad * but maybe used for reductions. */ INLINEDEF _CUDA_HD int tadElementWiseStride(Nd4jLong *shapeInfo, int *dimension,int dimensionLength) { return reductionIndexElementWiseStride(shapeInfo,dimension,dimensionLength); } INLINEDEF _CUDA_HD bool shapeEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2) { if(shape1Rank != shape2Rank) return false; //rank not equals for(int i = 0; i < shape1Rank; i++) { if(shape1[i] != shape2[i]) return false; } return true; } INLINEDEF _CUDA_HD bool shapeEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2) { return shape::shapeEquals(shape::rank(shapeInfo1),shape::shapeOf(shapeInfo1),shape::rank(shapeInfo2),shape::shapeOf(shapeInfo2)); } INLINEDEF _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2) { if(shape1Rank != shape2Rank) return false; //rank not equals for(int i = 0; i < shape1Rank; i++) { if(shape1[i] != shape2[i]) return false; } return true; } INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2) { return shape::strideEquals(shape::rank(shapeInfo1),shape::stride(shapeInfo1),shape::rank(shapeInfo2),shape::stride(shapeInfo2)); } INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1 , Nd4jLong *stride2, int rank2) { if(rank1 != rank2) return false; for(int i = 0; i < rank1; i++) { if(stride1[i] != stride2[i]) return false; } return true; } INLINEDEF _CUDA_HD Nd4jLong *computeResultShape(Nd4jLong *originalShapeBuffer, int* dimension,int dimensionLength) { Nd4jLong *retShape; int retShapeLength; if(dimensionLength == 1 && dimension[0] == 2147483647) { retShape = new Nd4jLong[2]; retShape[0] = 1; retShape[1] = 1; retShapeLength = 2; } else { retShape = shape::removeIndex<Nd4jLong, int>(shape::shapeOf(originalShapeBuffer), dimension, shape::shapeInfoLength(shape::rank(originalShapeBuffer)), dimensionLength); retShapeLength = shape::rank(originalShapeBuffer) - dimensionLength; } //ensure vector is proper shape if (retShapeLength == 1) { if (dimension[0] == 0) { auto newRetShape = new Nd4jLong[2]{1, retShape[0]}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } else { auto newRetShape = new Nd4jLong[2]{retShape[0], 1}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } } else if (retShapeLength == 0) { auto newRetShape = new Nd4jLong[2]{1, 1}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } auto ret = shape::shapeBuffer(retShapeLength,retShape); delete[] retShape; return ret; } INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer) { Nd4jLong *theShape = shape::shapeOf(shapeInfo); Nd4jLong *theStride = shape::stride(shapeInfo); int rank = dimensionLength == 1 ? 2 : dimensionLength; Nd4jLong *ret = buffer; //set the rank ret[0] = rank; Nd4jLong *retShape = shape::shapeOf(ret); Nd4jLong *retStride = shape::stride(ret); int len = rank; if(dimensionLength == 1) { if(shape::isMatrix(theShape,shape::rank(shapeInfo))) { if(dimension[0] == 0) { Nd4jLong newStride[2] = {theStride[dimension[0]],1}; Nd4jLong newShape[2] = {theShape[dimension[0]],1}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } else { Nd4jLong newStride[2] = {theStride[dimension[0]],1}; Nd4jLong newShape[2] = {theShape[dimension[0]],1}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } } else { Nd4jLong newStride[2] = {1,theStride[dimension[0]]}; Nd4jLong newShape[2] = {1,theShape[dimension[0]]}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } } else { Nd4jLong *newIndexes = dimension; if(reverseCopyStride) shape::reverseCopyTo(theStride, retStride, newIndexes, len); else shape::copyTo(len, theStride, retStride, newIndexes); shape::copyTo(len, theShape, retShape, newIndexes); } ret[shape::shapeInfoLength(rank) - 1] = shape::order(shapeInfo); return ret; } INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride) { int rank = dimensionLength == 1 ? 2 : dimensionLength; traceNew(4); Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)]; return shapeInfoOnlyShapeAndStride(shapeInfo, dimension, dimensionLength, reverseCopyStride, ret); } INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank) { traceNew(5); Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)]; return createShapeInfo(shape, stride, rank, ret); } INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer) { buffer[0] = rank; Nd4jLong *retShape = shape::shapeOf(buffer); Nd4jLong *retStride = shape::stride(buffer); for(int i = 0;i < rank; i++) { retShape[i] = shape[i]; retStride[i] = stride[i]; } return buffer; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum) { if (isVector(shape, rank)) { traceNew(5); Nd4jLong *ret = new Nd4jLong[2]; for (int i = 0; i < 2; i++) ret[i] = 1; return ret; } int dimensions = rank; traceNew(6); Nd4jLong *stride = new Nd4jLong[dimensions]; int st = startNum; for (int j = 0; j < rank; j++) { stride[j] = st; st *= shape[j]; } return stride; } INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong *ret) { if (isVector(shape, rank)) { for (int i = 0; i < 2; i++) ret[i] = 1; return ret; } int dimensions = rank; int st = startNum; for (int j = 0; j < rank; j++) { ret[j] = st; st *= shape[j]; } return ret; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum) { traceNew(7); Nd4jLong *stride = new Nd4jLong[rank]; if (rank == 1) { stride[0] = 1; return stride; } if (shape::isVector(shape, rank)) { for (int i = 0; i < 2; i++) stride[i] = 1; return stride; } int st = startNum; for (int j = rank - 1; j >= 0; j--) { stride[j] = st; st *= shape[j]; } return stride; } INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret) { if (rank == 1) { ret[0] = 1; return ret; } if (shape::isVector(shape, rank)) { for (int i = 0; i < 2; i++) ret[i] = 1; return ret; } int st = startNum; for (int j = rank - 1; j >= 0; j--) { ret[j] = st; st *= shape[j]; } return ret; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank) { return calcStridesFortran(shape, rank, 1); } INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret) { return calcStridesFortran(shape, rank, 1, ret); } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank) { return calcStrides(shape, rank, 1); } INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret) { return calcStrides(shape, rank, 1, ret); } INLINEDEF _CUDA_HD void updateStrides(Nd4jLong *shape, const char order) { int rank = shape[0]; int doubleRank = 2*rank; if (rank > 0) if(order == 'c') { shape[doubleRank] = 1; // set unity as last stride for c order for(int j=1; j<rank; ++j) shape[doubleRank-j] = shape[doubleRank-j+1]*shape[rank+1-j]; } else { shape[rank+1] = 1; // set unity as first stride for f order for(int j=rank+1; j<doubleRank; ++j) shape[j+1] = shape[j]*shape[j-rank]; } // set last 3 elements in shape shape[doubleRank + 1] = 0; shape[doubleRank + 2] = 1; shape[doubleRank + 3] = (int)order; } // check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1 template <typename T> INLINEDEF _CUDA_HD bool isDimPermuted(const T* dimensions, const Nd4jLong dimSize ) { for(int i=0; i<dimSize-1; ++i) if(dimensions[i] > dimensions[i+1]) return true; return false; } /** * @param toCopy the shape to copy * @return a copy of the original struct */ INLINEDEF _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy) { auto copy = new ShapeInformation; traceNew(8); copy->shape = new Nd4jLong[toCopy->rank]; memcpy(copy->shape, toCopy->shape, toCopy->rank * sizeof(Nd4jLong)); traceNew(9); copy->stride = new Nd4jLong[toCopy->rank]; for (int i = 0; i < toCopy->rank; i++) { copy->stride[i] = toCopy->stride[i]; } copy->order = toCopy->order; copy->rank = toCopy->rank; copy->offset = toCopy->offset; copy->elementWiseStride = toCopy->elementWiseStride; return copy; } INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder) { if (rank == 0) return 1; if(shape::isVector(shape,rank)) { return stride[rank - 1]; } else { int oldnd; Nd4jLong *olddims = shape::copyOf(rank, shape); Nd4jLong *oldstrides = shape::copyOf(rank, stride); int np, op, last_stride; int oi, oj, ok, ni, nj, nk; traceNew(10); auto newStrides = new Nd4jLong[rank]; oldnd = 0; //set the shape to be 1 x length int newShapeRank = 2; auto newShape = new Nd4jLong[newShapeRank]; newShape[0] = 1; newShape[1] = shape::prodLong(shape, rank); /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oi = 0; oi < rank; oi++) { if (shape[oi] != 1) { olddims[oldnd] = shape[oi]; oldstrides[oldnd] = stride[oi]; oldnd++; } } np = 1; for (ni = 0; ni < newShapeRank; ni++) { np *= newShape[ni]; } op = 1; for (oi = 0; oi < oldnd; oi++) { op *= olddims[oi]; } if (np != op) { /* different total sizes; no hope */ delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return -1; } if (np == 0) { /* the current code does not handle 0-sized arrays, so give up */ delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return -1; } /* oi to oj and ni to nj give the axis ranges currently worked with */ oi = 0; oj = 1; ni = 0; nj = 1; while (ni < newShapeRank && oi < oldnd) { np = newShape[ni]; op = olddims[oi]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newShape[nj++]; } else { op *= olddims[oj++]; } } /* Check whether the original axes can be combined */ for (ok = oi; ok < oj - 1; ok++) { if (isFOrder) { if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) { /* not contiguous enough */ delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return -1; } } else { /* C order */ if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) { /* not contiguous enough */ delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return -1; } } } /* Calculate new strides for all axes currently worked with */ if (isFOrder) { newStrides[ni] = oldstrides[oi]; for (nk = ni + 1; nk < nj; nk++) { newStrides[nk] = newStrides[nk - 1] * newShape[nk - 1]; } } else { /* C order */ newStrides[nj - 1] = oldstrides[oj - 1]; for (nk = nj - 1; nk > ni; nk--) { newStrides[nk - 1] = newStrides[nk] * newShape[nk]; } } ni = nj++; oi = oj++; } /* * Set strides corresponding to trailing 1s of the new shape. */ if (ni >= 1) { last_stride = newStrides[ni - 1]; } else { last_stride = stride[rank - 1]; } if (isFOrder) { if (ni >= 1) last_stride *= newShape[ni - 1]; } for (nk = ni; nk < newShapeRank; nk++) { newStrides[nk] = last_stride; } //returns the last element of the new stride array int ret = last_stride; delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return ret; } } INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder, Nd4jLong *dimension, int dimensionLength) { if(dimensionLength == 1) { return stride[dimension[0]]; } return -1; } /** * Get the shape info buffer * for the given rank and shape. */ INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, Nd4jLong *shape) { Nd4jLong *stride = shape::calcStrides(shape, rank); traceNew(11); auto shapeInfo = new shape::ShapeInformation(); shapeInfo->shape = shape; shapeInfo->stride = stride; shapeInfo->offset = 0; shapeInfo->rank = rank; int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo->order = 'c'; shapeInfo->elementWiseStride = elementWiseStride; auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo); delete[] stride; delete shapeInfo; return shapeInfoBuffer; } /** * This is special method, it returns ONLY 2D shapebuffer. * * This method is used only for SoftMax */ INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *buffer) { Nd4jLong stride[MAX_RANK]; shape::calcStrides(shape,rank, stride); shape::ShapeInformation shapeInfo; shapeInfo.shape = shape; shapeInfo.stride = stride; shapeInfo.offset = 0; shapeInfo.rank = rank; auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo.order = 'c'; shapeInfo.elementWiseStride = elementWiseStride; shape::toShapeBuffer(&shapeInfo, buffer); return buffer; } /** * Get the shape info buffer * for the given rank and shape. */ INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, Nd4jLong *shape) { auto stride = shape::calcStridesFortran(shape,rank); traceNew(12); auto shapeInfo = new shape::ShapeInformation(); shapeInfo->shape = shape; shapeInfo->stride = stride; shapeInfo->offset = 0; shapeInfo->rank = rank; int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo->order = 'f'; shapeInfo->elementWiseStride = elementWiseStride; auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo); delete[] stride; delete shapeInfo; return shapeInfoBuffer; } INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, Nd4jLong *shape, Nd4jLong *output) { Nd4jLong stride[MAX_RANK]; shape::calcStridesFortran(shape,rank, stride); shape::ShapeInformation shapeInfo; shapeInfo.shape = shape; shapeInfo.stride = stride; shapeInfo.offset = 0; shapeInfo.rank = rank; auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo.order = 'f'; shapeInfo.elementWiseStride = elementWiseStride; shape::toShapeBuffer(&shapeInfo, output); return output; } /** * Compute the real linear indices for the given shape and stride */ INLINEDEF _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride) { Nd4jLong length = shape::prodLong(shape,rank); traceNew(13); Nd4jLong *ret = new Nd4jLong[length]; for(int i = 0; i < length; i++) { Nd4jLong *idx = shape::ind2sub(rank, shape, i); ret[i] = shape::getOffset(0, shape, stride, idx, rank); delete[] idx; } return ret; } /** * Compute the real linear indices for the given shape and stride */ INLINEDEF _CUDA_HD Nd4jLong *computeIndices(Nd4jLong *shapeBuffer) { return computeIndices(shape::rank(shapeBuffer),shape::shapeOf(shapeBuffer),shape::stride(shapeBuffer)); } /** * Convert the given index (such as 1,1) * to a linear index * @param shape the shape of the indexes to convert * @param indices the index to convert * @return the linear index given the shape * and indices */ INLINEDEF _CUDA_HD int sub2Ind(int rank, Nd4jLong *shape, Nd4jLong *indices) { int index = 0; int shift = 1; for(int i = 0; i < rank; i++) { index += shift * indices[i]; shift *= shape[i]; } return index; } template <typename T> INLINEDEF _CUDA_HD void fill(T* buffer, T value, Nd4jLong length) { #pragma omp simd for (int e = 0; e < length; e++) buffer[e] = value; } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices) { auto ret = new Nd4jLong[rank]; ind2sub(rank, shape, index, numIndices, ret); return ret; } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index) { return ind2sub(rank,shape, index, shape::prodLong(shape,rank)); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *ret) { int denom = numIndices; for(int i = rank - 1; i >= 0; i--) { denom /= shape[i]; ret[i] = index / denom; index %= denom; } } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2sub(int rank,Nd4jLong *shape, Nd4jLong index, Nd4jLong *out) { ind2sub(rank,shape, index, shape::prodLong(shape,rank),out); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD Nd4jLong * ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices) { auto ret = new Nd4jLong[rank]; ind2subC(rank, shape, index, numIndices, ret); return ret; } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD Nd4jLong *ind2subC(int rank, Nd4jLong *shape, Nd4jLong index) { return ind2subC(rank,shape, index, shape::prodLong(shape,rank)); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *ret) { auto denom = numIndices; for(int i = 0; i < rank; i++) { denom /= shape[i]; if(denom > 0) { ret[i] = index / denom; index %= denom; } else ret[i] = 0; } } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong *out) { ind2subC(rank,shape, index,shape::prodLong(shape,rank),out); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *out) { if(shape::order(shapeInfo) == 'f') { shape::ind2sub( shape::rank(shapeInfo), shape::shapeOf(shapeInfo), index, numIndices, out); } else { shape::ind2subC( shape::rank(shapeInfo), shape::shapeOf(shapeInfo), index, numIndices, out); } } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong *out) { ind2subOrder(shapeInfo,index,shape::length(shapeInfo),out); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ /** * * @param length * @param shape * @param rearrange * @return */ INLINEDEF _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int *rearrange) { traceNew(16); Nd4jLong *ret = new Nd4jLong[length]; for (int i = 0; i < length; i++) { ret[i] = shape[rearrange[i]]; } return ret; } /** * * @param length * @param shape * @param rearrange * @return */ INLINEDEF _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int *rearrange) { if(length == 1) { return; } else { Nd4jLong *shapeDeref = *shape; if(shape::prodLong(shapeDeref,length) < 2) { return; } } bool inOrder = true; for(int i = 0; i < length - 1; i++) { inOrder = inOrder && rearrange[i] + 1 == rearrange[i + 1]; } //all in order, nothing to do if(inOrder) return; Nd4jLong *shapeDeref = *shape; //we know they are just reversed, dimension length of 2 if(length == 2) { auto shapeFirst = shapeDeref[0]; auto shapeSecond = shapeDeref[1]; shapeDeref[0] = shapeSecond; shapeDeref[1] = shapeFirst; return; } else if(length == 1) { //no permute return; } auto temp = new Nd4jLong[length]; memcpy(temp,shapeDeref,sizeof(Nd4jLong) * length); for (int i = 0; i < length; i++) { shapeDeref[i] = temp[rearrange[i]]; } delete[] temp; } INLINEDEF _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *out) { if(shapeBuffer != out) memcpy(out,shapeBuffer,sizeof(Nd4jLong) * shape::shapeInfoLength(shape::rank(shapeBuffer))); doPermuteShapeBuffer(shape::rank(shapeBuffer), shapeBuffer, rearrange, out); } INLINEDEF _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange) { auto len = shape::shapeInfoLength(shape::rank(shapeBuffer)); Nd4jLong *copy = shape::copyOf(len, shapeBuffer); doPermuteShapeBuffer(copy,rearrange); return copy; } INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const Nd4jLong *rearrange) { const int rank = shape::rank(shapeInfo); //check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute if(prodLong(shape::shapeOf(shapeInfo), rank) < 2) return; // check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well bool isPermutNecessary = false; for(int i = 0; i < rank; ++i) if(rearrange[i] != i) { isPermutNecessary = true; break; } if(!isPermutNecessary) return; // check whether rearrange contains correct indexes for(int i = 0; i < rank; ++i) if(rearrange[i] >= rank || rearrange[i] < 0) { printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n"); return; } // if everything is ok then perform permute auto temp = new Nd4jLong[shape::shapeInfoLength(rank)]; memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank)); for (int i = 0; i < rank; ++i) { shapeInfo[i + 1] = temp[rearrange[i] + 1]; shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank]; } shapeInfo[shapeInfoLength(rank) - 2] = -1; shapeInfo[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo),1); delete[] temp; } INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const int* rearrange) { const int rank = shape::rank(shapeInfo); //check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute if(prodLong(shape::shapeOf(shapeInfo), rank) < 2) return; // check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well bool isPermutNecessary = false; for(int i = 0; i < rank; ++i) if(rearrange[i] != i) { isPermutNecessary = true; break; } if(!isPermutNecessary) return; // check whether rearrange contains correct indexes for(int i = 0; i < rank; ++i) if(rearrange[i] >= rank || rearrange[i] < 0) { printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n"); return; } // if everything is ok then perform permute auto temp = new Nd4jLong[shape::shapeInfoLength(rank)]; memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank)); for (int i = 0; i < rank; ++i) { shapeInfo[i + 1] = temp[rearrange[i] + 1]; shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank]; } shapeInfo[shapeInfoLength(rank) - 2] = -1; shapeInfo[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo),1); delete[] temp; } INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer,int *rearrange) { //no swapping needs to happen if(shape::isScalar(shapeBuffer)) { return; } Nd4jLong *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = shape::rank(shapeRef); Nd4jLong *shape = shape::shapeOf(shapeRef); Nd4jLong *stride = shape::stride(shapeRef); shape::doPermuteSwap(rearrageRank,&shape,rearrange); shape::doPermuteSwap(rearrageRank,&stride,rearrange); shapeRef[shapeInfoLength(rearrageRank) - 2] = -1; shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1); // doPermuteShapeInfo(shapeBuffer, rearrange); // possible fix of integer overflow issue when strides are too large } /* INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) { auto shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = shape::rank(shapeRef); auto shape = shape::shapeOf(shapeRef); auto stride = shape::stride(shapeRef); shape::copyOf(rearrageRank,rearrange, tmpBuffer); shape::doPermuteSwap(rearrageRank,&shape, tmpBuffer); shape::copyOf(rearrageRank,rearrange, tmpBuffer); shape::doPermuteSwap(rearrageRank,&stride,tmpBuffer); shapeRef[shapeInfoLength(rearrageRank) - 2] = -1; shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1); } */ INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int *rearrange) { Nd4jLong *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = rank; Nd4jLong *shape = shape::shapeOf(shapeRef); Nd4jLong *stride = shape::stride(shapeRef); auto rearrangeCopy1 = shape::copyOf(rearrageRank, rearrange); shape::doPermuteSwap(rearrageRank,&shape,rearrangeCopy1); delete[] rearrangeCopy1; auto rearrangeCopy2 = shape::copyOf(rearrageRank,rearrange); shape::doPermuteSwap(rearrageRank, &stride, rearrangeCopy2); shapeBuffer[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1); shapeBuffer[shape::shapeInfoLength(rank) - 2] = -1; delete[] rearrangeCopy2; } INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) { Nd4jLong *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = rank; auto shape = shape::shapeOf(shapeRef); auto stride = shape::stride(shapeRef); if(shapeBuffer != tmpBuffer) shape::copyOf(rearrageRank,shapeBuffer, tmpBuffer); shape::doPermuteSwap(rearrageRank,&shape,rearrange); shape::doPermuteSwap(rearrageRank,&stride,rearrange); shapeRef[shapeInfoLength(rank) - 2] = -1; shapeRef[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1); } INLINEDEF _CUDA_HD Nd4jLong *createPermuteIndexes(int originalRank, int *dimension,int dimensionLength) { int delta = originalRank - dimensionLength; traceNew(17); Nd4jLong *ret = new Nd4jLong[originalRank]; for(int i = 0; i < delta; i++) { ret[i] = i + dimensionLength; } for(int i = delta; i < originalRank; i++) { ret[i] = i - delta; } return ret; } /** * Get the ordering for the device * @param length * @param shape * @param stride * @param elementStride * @return */ INLINEDEF _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride) { int sd = -1; int dim = -1; int i = -1; int cContiguous = 1; int isFortran = 1; sd = 1; for (i = length - 1; i >= 0; --i) { dim = shape[i]; if (stride[i] != sd) { cContiguous = 0; break; } /* contiguous, if it got this far */ if (dim == 0) { break; } sd *= dim; } /* check if fortran contiguous */ sd = elementStride; for (i = 0; i < length; ++i) { dim = shape[i]; if (stride[i] != sd) { isFortran = 0; } if (dim == 0) { break; } sd *= dim; } if (isFortran && cContiguous) return 'a'; else if (isFortran && !cContiguous) return 'f'; else if (!isFortran && !cContiguous) return 'c'; else return 'c'; } /** * Ensure that every value in the re arrange * array is unique * @param arr * @param shape * @param arrLength * @param shapeLength * @return */ template <typename T> INLINEDEF _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength) { if (arrLength != shapeLength) return -1; for (int i = 0; i < arrLength; i++) { if (arr[i] >= arrLength || arr[i] < 0) return -1; } for (int i = 0; i < arrLength; i++) { for (int j = 0; j < arrLength; j++) { if (i != j && arr[i] == arr[j]) return -1; } } return 1; } INLINEDEF _CUDA_HD void traceNew(int id) { //printf("new happened: [%i]\n", id); #ifndef __CUDACC__ //fflush(stdout); #endif } /** * Permute the shape information * @param info the shape information to permute * @param rearrange the order to re arrange * @param rank the rank of the rearrange array */ INLINEDEF _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank) { ShapeInformation *infoDeref = *info; checkArrangeArray(rearrange, rank, rank); shape::doPermuteSwap(rank, &infoDeref->shape, rearrange); shape::doPermuteSwap(rank, &infoDeref->stride, rearrange); char order = getOrder(rank, infoDeref->shape, infoDeref->stride, infoDeref->elementWiseStride); infoDeref->order = order; } /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ INLINEDEF _CUDA_HD int isVector(Nd4jLong *shape, int rank) { if (rank == 0) return 0; if (rank == 1) return 1; if (rank > 2) return 0; else if (rank <= 2) { if (shape[0] == 1 || shape[1] == 1) return 1; } return 0; } INLINEDEF _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim) { int numOfNonUnity = 0; for(int i = 1; i <= shapeInfo[0]; ++i) { if(shapeInfo[i] != 1) { ++numOfNonUnity; posOfNonUnityDim = i-1; } } return numOfNonUnity == 1 && shapeInfo[0] > 2; } INLINEDEF _CUDA_H Nd4jLong* detachShape(Nd4jLong *originalShape) { Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)]; memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape)); return newShape; } INLINEDEF _CUDA_H Nd4jLong* copyShape(Nd4jLong *originalShape) { Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)]; memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape)); return newShape; } INLINEDEF _CUDA_HD int isVector(Nd4jLong *shapeInfo) { return isVector(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } INLINEDEF _CUDA_HD bool isRowVector(Nd4jLong *shapeInfo) { bool isVector = shape::isVector(shapeInfo) == 1; bool shapeFirstOne = shape::shapeOf(shapeInfo)[0] == 1; return isVector && shapeFirstOne; } INLINEDEF _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo) { bool isVector = shape::isVector(shapeInfo) == 1; bool shapeFirstOne = shape::shapeOf(shapeInfo)[0] == 1; return isVector && !shapeFirstOne; } INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank) { for(int i = 0; i < rank; i++) { if(shape[i] == shape::prod(shape,rank)) return 1; } return 0; } INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo) { return oneDimEqualToLength(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shape, int rank) { if (rank > 2) return 0; else if (rank <= 2) { if (shape[0] == 1 || shape[1] == 1) return 0; } return 1; } INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo) { return isMatrix(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } /** * Returns the shape portion of an information * buffer */ INLINEDEF _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer) { return buffer + 1; } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> INLINEDEF _CUDA_HD T *copyOf(Nd4jLong length, T *toCopy) { traceNew(18); T *ret = new T[length]; return copyOf(length, toCopy, ret); } template <typename T> INLINEDEF _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret) { memcpy(ret, toCopy, sizeof(T)*length); return ret; } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> INLINEDEF _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to) { memcpy(to, from, sizeof(T)*length); } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ INLINEDEF _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes) { for(int i = 0; i < length; i++) { to[i] = from[indexes[i]]; } } /** * Permute the given strides * in the given rearrange order * @param toPermute the buffer to permute * @param shapeRank the length of the buffer to permute * @param rearrange the rearrange order (must be 0 based indexes * and all must be filled in) * @return the rearranged array */ /* INLINEDEF _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, int *rearrange) { Nd4jLong *strideCopy = copyOf(shapeRank, toPermute); checkArrangeArray(rearrange, shapeRank, shapeRank); Nd4jLong *newStride = doPermuteSwap(shapeRank, strideCopy, rearrange); delete[] strideCopy; return newStride; } */ /** * Return the slice (shape + 1 in pointer arithmetic) * @param shape the shape to take the slice of * @return the shape array - the first entry */ INLINEDEF _CUDA_HD Nd4jLong *slice(Nd4jLong *shape) { return shape + 1; } INLINEDEF _CUDA_HD int slices(Nd4jLong *shapeBuffer) { return static_cast<int>(shape::shapeOf(shapeBuffer)[0]); } INLINEDEF _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer) { int rank = shape::rank(shapeBuffer); int newRank = rank - 1; if(newRank < 2) newRank = 2; Nd4jLong *newShapeBuffer = new Nd4jLong[shape::shapeInfoLength(newRank)]; newShapeBuffer[0] = newRank; Nd4jLong *currShape = shape::shapeOf(shapeBuffer); Nd4jLong *currStride = shape::stride(shapeBuffer); //initialize new shape and stride by taking the shape and stride + 1 //and adding to the shape information //a slice is always just taking the existing shape and cutting the first index off //of the shape and stride Nd4jLong *newShape = shape::shapeOf(newShapeBuffer); Nd4jLong *newStride = shape::stride(newShapeBuffer); if(shape::isVector(shapeBuffer)) { Nd4jLong *currShape = shape::shapeOf(shapeBuffer); //row vector: slice index 0 is a valid index, just copy the whole thing if(currShape[0] == 1) { if(sliceIdx == 0) { memcpy(newShapeBuffer,shapeBuffer,shape::shapeInfoByteLength(shape::rank(shapeBuffer))); return newShapeBuffer; } } //column vector: this will be a scalar else { delete[] newShapeBuffer; Nd4jLong *scalar = shape::createScalarShapeInfo(); int offset = shape::offset(shapeBuffer); scalar[shape::shapeInfoLength(2) - 3] = offset + sliceIdx; return scalar; } } else if(shape::isMatrix(shapeBuffer)) { newShape[0] = 1; newShape[1] = currShape[1]; newStride[0] = 1; newStride[1] = currStride[1]; } else { for(int i = 0; i < newRank; i++) { newShape[i] = currShape[i + 1]; newStride[i] = currStride[i + 1]; } } auto indices = new Nd4jLong[rank]; memset((void *) indices,0,rank * sizeof(Nd4jLong)); indices[0] = sliceIdx; Nd4jLong offset = shape::getOffset(0,newShape,newStride,indices,rank); newShapeBuffer[shape::shapeInfoLength(newRank) - 3] = offset; if(shape::isMatrix(shapeBuffer)) { newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = currStride[1]; } else { newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = shape::elementWiseStride(shapeBuffer); } newShapeBuffer[shape::shapeInfoLength(newRank) - 1] = shape::getOrder(newRank,newShape,newStride,1); delete[] indices; return newShapeBuffer; } /** * Returns the length of the * shape information buffer: * rank * 2 + 3 * @param rank the rank to get the shape * info length for * @return rank * 2 + 4 */ INLINEDEF _CUDA_HD int shapeInfoLength(int rank) { //FIXME magic numbers return rank * 2 + 4; } INLINEDEF _CUDA_HD int shapeInfoLength(Nd4jLong* shape) { return shapeInfoLength(shape[0]); } INLINEDEF _CUDA_HD size_t shapeInfoByteLength(int rank) { //FIXME magic numbers return (rank * 2 + 4) * sizeof(Nd4jLong); } INLINEDEF _CUDA_HD size_t shapeInfoByteLength(Nd4jLong* shapeInfo) { //FIXME magic numbers return shapeInfoByteLength((int) shapeInfo[0]); } /** * Returns the rank portion of * an information buffer */ INLINEDEF _CUDA_HD int rank( Nd4jLong *buffer) { return static_cast<int>(buffer[0]); } /** * Converts a raw int buffer of the layout: * rank * shape * stride * offset * elementWiseStride * * where shape and stride are both straight int pointers */ INLINEDEF _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer) { traceNew(19); auto info = new ShapeInformation; auto length = shapeInfoLength(rank(buffer)); auto rank = buffer[0]; //start after rank info->shape = buffer + 1; info->stride = buffer + (1 + rank); info->rank = rank; info->offset = buffer[length - 3]; info->elementWiseStride = buffer[length - 2]; Nd4jLong *stride = buffer + 1 + rank; info->stride = stride; info->order = (char) buffer[length - 1]; return info; } /** * Returns the stride portion of an information * buffer */ INLINEDEF _CUDA_HD Nd4jLong *stride( Nd4jLong *buffer) { return buffer + (1 + rank(buffer)); } INLINEDEF _CUDA_HD bool isEmpty(Nd4jLong *shapeInfo) { return ((shape::extra(shapeInfo) & ARRAY_EMPTY) == ARRAY_EMPTY); } /** * Compute the length of the given shape */ INLINEDEF _CUDA_HD Nd4jLong length(Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); if (rank == 0) { if (isEmpty(shapeInfo)) return 0L; else return 1L; } if (rank == 1) return shapeInfo[1]; return shape::prodLong(shape::shapeOf(shapeInfo), rank); } INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape) { Nd4jLong ret = 1; for (auto v : shape) { ret *= v; } return ret; } INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape) { Nd4jLong ret = 1; for (auto v : shape) { ret *= v; } return ret; } /*** * Returns the offset * portion of an information buffer */ INLINEDEF _CUDA_HD Nd4jLong offset(Nd4jLong *buffer) { return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3]; } INLINEDEF _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer) { return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3]; } /** * Returns the ordering * for this shape information buffer */ INLINEDEF _CUDA_HD char order(Nd4jLong *buffer) { //FIXME magic numbers return static_cast<char>(buffer[(buffer[0] * 2 + 4) - 1]); } /** * Returns the element wise stride for this information * buffer */ INLINEDEF _CUDA_HD Nd4jLong elementWiseStride(Nd4jLong *buffer) { return buffer[shapeInfoLength(buffer[0]) - 2]; } /** * Returns the element wise stride for this information * buffer relative to a dimension and reduction index */ INLINEDEF _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong* buffer, int* dimension, int dimensionLength) { if(dimensionLength > 1) { if(shape::order(buffer) == 'f') { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) { //int tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; //return tadElementWiseStride; auto tadElementWiseStride = shape::stride(buffer)[dimension[0]]; return tadElementWiseStride; } return 1; } else { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) { auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; return tadElementWiseStride; } return 1; } } else { if(shape::order(buffer) == 'f') { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ auto tadElementWiseStride = shape::stride(buffer)[dimension[0]]; return tadElementWiseStride; } else { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; return tadElementWiseStride; } } } /** * Returns whether * the given shape info buffer * represents a scalar shape */ INLINEDEF _CUDA_HD int isScalar(Nd4jLong *info) { const int rank = shape::rank(info); if(rank > 2) return 0; if(rank == 0) return 1; if(rank == 1) return shape::shapeOf(info)[0] == 1; if(rank == 2) return shape::shapeOf(info)[0] == 1 && shape::shapeOf(info)[1] == 1; return 0; } /** * Returns whether * the given shape information * represents a scalar * shape or not */ INLINEDEF _CUDA_HD int isScalar(volatile ShapeInformation *info) { const int rank = info->rank; if(rank > 2) return 0; if(rank == 1) return info->shape[0] == 1; if(rank == 2) return info->shape[0] == 1 && info->shape[1] == 1; return 0; } /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> INLINEDEF _CUDA_HD void removeIndex(T1* data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *ret) { int count = 0; int absLength = dataLength - indexesLength; for (int i = 0; i < dataLength && count < absLength; i++) { int contains = 0; for (int j = 0; j < indexesLength; j++) { if (i == indexes[j]) { contains = 1; break; } } if (!contains) { ret[count] = data[i]; count++; } } } /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> INLINEDEF _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength) { auto lengthOfArr = dataLength - indexesLength; if(lengthOfArr < 0) { printf("Remove index call created a <= 0 length array. This was likely not intended."); } auto ret = new T1[lengthOfArr]; memset(ret,0,sizeof(T1) * lengthOfArr); removeIndex<T1, T2>(data, indexes, dataLength, indexesLength, ret); return ret; } INLINEDEF _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end) { int len = end - indexesLength; traceNew(20); auto ret = new Nd4jLong[len]; int retIdx = 0; //not here that we do 0 based indexing for end - this assumes things like: //0 to 4 are specified for(int i = begin; i < end ; i++) { bool found = false; for(int j = 0; j < indexesLength; j++) { if(indexes[j] == i) { found = true; break; } } if(!found) { ret[retIdx++] = i; } } return ret; } /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ #ifdef __CUDACC__ INLINEDEF __device__ int tadOffset(ShapeInformation *xInfo, int offset) { return offset + threadIdx.x * xInfo->elementWiseStride; } #endif /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape, int dimension) { traceNew(21); Nd4jLong *ret = new Nd4jLong[2]; if (dimension == 0) { ret[0] = 1; ret[1] = shape[0]; } else { ret[0] = shape[0]; ret[1] = 1; } return ret; } /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape) { return ensureVectorShape(shape, 0); } /** * This method does STRICT comparison for two shape buffers * * @param shape * @return */ INLINEDEF _CUDA_HD bool equalsStrict(Nd4jLong *shapeA, Nd4jLong *shapeB) { if (shapeA[0] != shapeB[0]) return false; if (shapeA[0] == 0) return true; // we do full comparison here int length = shape::shapeInfoLength(shapeA[0]); for (int e = 1; e < length; e++) if (shapeA[e] != shapeB[e]) return false; return true; } INLINEDEF _CUDA_HD int sizeAt(Nd4jLong *shape, int dim) { if (dim >= 0) return shape[1+dim]; else return shape[1+(rank(shape) + dim)]; } /** * This method does SOFT comparison for two shape buffers, we compare only rank & shapes * * @param shape * @return */ INLINEDEF _CUDA_HD bool equalsSoft(Nd4jLong *shapeA, Nd4jLong *shapeB) { if (shapeA[0] != shapeB[0]) return false; if (shapeA[0] == 0) return true; // we compare only shapes, and ignoring stride & ews auto length = shapeA[0]; for (int e = 1; e <= length; e++) if (shapeA[e] != shapeB[e]) return false; return true; } /** * Generate an int buffer * up to the given length * at the specified increment * */ template <typename T> INLINEDEF _CUDA_HD T* range(int from, int to, int increment) { int diff = nd4j::math::nd4j_abs<int>(from - to); int retLength = diff / increment; T *ret; traceNew(22); if(diff / increment < 1) ret = new T[1]; else ret = new T[diff / increment]; if (from < to) { int count = 0; for (int i = from; i < to; i += increment) { if (count >= retLength) break; ret[count++] = i; } } else if (from > to) { int count = 0; for (int i = from - 1; i >= to; i -= increment) { if (count >= retLength) break; ret[count++] = i; } } return ret; } /** * Generate a range * beginning at from and ending at to * incrementing by 1 * @param from the start * @param to the end * @return the int array starting at from and ending at to */ template <typename T> INLINEDEF _CUDA_HD T* range(int from, int to) { return range<T>(from, to, 1); } /** * Keep the given indexes in the data * @param data * @param index * @param indexLength * @param dataLength * @return */ INLINEDEF _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength) { traceNew(23); Nd4jLong *ret = new Nd4jLong[indexLength]; int count = 0; for (int i = 0; i < dataLength; i++) { int contains = 0; for (int j = 0; j < indexLength; j++) { if (i == index[j]) { contains = 1; break; } } if (contains) ret[count++] = data[i]; } return ret; } /** * Generate a reverse * copy of the data */ template <typename T> INLINEDEF _CUDA_HD T* reverseCopy(T *data, Nd4jLong length) { if (length < 1) return nullptr; traceNew(24); T *copy = new T[length]; for (Nd4jLong i = 0; i <= length / 2; i++) { T temp = data[i]; copy[i] = data[length - i - 1]; copy[length - i - 1] = temp; } return copy; } template <typename T> INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length) { if (length < 1) return; for (Nd4jLong i = 0; i <= length / 2; i++) { T temp = from[i]; to[i] = from[length - i - 1]; to[length - i - 1] = temp; } } template <typename T> INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length) { if (length < 1) return; for (Nd4jLong i = 0; i <= length / 2; i++) { T temp = from[indexes[i]]; to[i] = from[indexes[length - i - 1]]; to[length - i - 1] = temp; } } /** * * @param arr1 * @param arr1Length * @param arr2 * @param arr2Length * @return */ template <typename T> INLINEDEF _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length) { traceNew(25); T *ret = new T[arr1Length + arr2Length]; std::memcpy(ret, arr1, arr1Length * sizeof(T)); std::memcpy(ret + arr1Length, arr2, arr2Length * sizeof(T)); return ret; } /** * * @param numArrays * @param numTotalElements * @param arr * @param lengths * @return */ template <typename T> INLINEDEF _CUDA_HD T *concat(Nd4jLong numArrays, Nd4jLong numTotalElements, T **arr, Nd4jLong *lengths) { T* ret = new T[numTotalElements]; Nd4jLong count = 0; for (Nd4jLong i = 0; i < numArrays; i++) { for (Nd4jLong j = 0; j < lengths[i]; j++) { ret[count++] = arr[i][j]; } } return ret; } /** * Get the length per slice of the * given shape and the dimension * @param rank the rank of the shape * @param shape the shape of to get * the length per slice for * @param dimension the dimension to * get the length per slice for * @param dimensionLength the length of the dimension array * @return the length per slice of the given shape * along the given dimension */ INLINEDEF _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int* dimension, int dimensionLength) { if(shape::isVector(shape,rank)) { //return total length for row vectors if(dimensionLength == 1 && shape[0] == 1) { return shape::prod(shape,rank); } } else if(rank == dimensionLength) return shape::prod(shape,rank); int absSelta = nd4j::math::nd4j_abs<int>(rank - dimensionLength); traceNew(27); auto ret2 = shape::removeIndex<Nd4jLong>(shape, dimension, rank, dimensionLength); auto ret = prodLong(ret2, absSelta); delete[] ret2; return ret; } /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank, int index, Nd4jLong *shape, Nd4jLong *tensorShape, int tensorShapeLength, int* dimension, int dimensionLength) { auto tensorLength = prodLong(tensorShape, tensorShapeLength); auto lengthPerSlice2 = lengthPerSlice(rank, shape, dimension, dimensionLength); if (lengthPerSlice2 <= 0) { return 0; } Nd4jLong offset = index * tensorLength / lengthPerSlice2; return offset; } /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2) { Nd4jLong offset = index * tensorLength / lengthPerSlice2; return offset; } #ifdef __CUDACC__ /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ INLINEDEF _CUDA_D int tadOffset(Nd4jLong *xInfo, int offset) { return offset + threadIdx.x * elementWiseStride(xInfo); } #endif /** * Computes the number * of tensors along * a given dimension */ INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(volatile int rank, volatile int length, volatile Nd4jLong *shape, int *dimension, int dimensionLength) { Nd4jLong *tensorShape = shape::keep(shape, dimension, dimensionLength, rank); Nd4jLong ret = length / shape::prodLong(tensorShape, dimensionLength); delete[] tensorShape; return ret; } /** * Computes the number * of tensors along * a given dimension */ INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) { Nd4jLong *keepShape = shape::shapeOf(shapeInfo); Nd4jLong *tensorShape = shape::keep(keepShape, dimension, dimensionLength, rank(shapeInfo)); Nd4jLong ret = shape::length(shapeInfo) / shape::prodLong(tensorShape, dimensionLength); delete[] tensorShape; return ret; } /** * Get an offset for retrieval * from a data buffer * based on the given * shape stride and given indices * @param baseOffset the offset to start from * @param shape the shape of the array * @param stride the stride of the array * @param indices the indices to iterate over * @return the double at the specified index */ INLINEDEF _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, Nd4jLong *shape, Nd4jLong *stride, Nd4jLong *indices, int rank) { Nd4jLong offset = baseOffset; for(int i = 0; i < rank; i++) { if(indices[i] >= shape[i] && shape[i] != 1) { #ifdef __CUDA_ARCH__ printf("D: Index %i [%lld] must not be >= shape[%lld].\n", i,indices[i],shape[i]); #else printf("H: Index %i [%lld] must not be >= shape[%lld].\n", i, (long long) indices[i], (long long) shape[i]); #endif #ifdef __CUDA_ARCH__ if (threadIdx.x == 0 && blockIdx.x == 0) printShapeInfoLinear("getOffsetFailed", rank, shape, stride); #endif return -1; } if(shape[i] != 1) { offset += indices[i] * stride[i]; } } return offset; } /** * Returns the tensor along dimension * for the given block index * @param blockSize * @param blockIdx * @param i * @return */ INLINEDEF _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i) { return blockIdx + i * blockSize; } /** * Computes the number of tads per block * */ INLINEDEF _CUDA_HD int tadsPerBlock(int blockSize, int tads) { return (int) nd4j::math::nd4j_ceil<double>(tads / (double) blockSize); } /** * Returns a shape buffer * for the shape information metadata. */ INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info) { traceNew(29); auto ret = new Nd4jLong[shapeInfoLength(info->rank)]; int count = 1; int rank = info->rank; ret[0] = info->rank; for (int i = 0; i < rank; i++) { ret[count++] = info->shape[i]; } for (int i = 0; i < rank; i++) { ret[count++] = info->stride[i]; } ret[count++] = info->offset; ret[count++] = info->elementWiseStride; ret[count] = info->order; return ret; } INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret) { int count = 1; int rank = info->rank; ret[0] = info->rank; if (ret[0] == 0) { ret[1] = 0; ret[2] = 1; ret[3] = 99; return ret; } for (int i = 0; i < rank; i++) { ret[count++] = info->shape[i]; } for (int i = 0; i < rank; i++) { ret[count++] = info->stride[i]; } ret[count++] = info->offset; ret[count++] = info->elementWiseStride; ret[count++] = info->order; return ret; } INLINEDEF _CUDA_HD void printIntArray(Nd4jLong *arr,int length) { for(int i = 0; i < length; i++) { printf(" %lld ", (long long) arr[i]); } printf("\n"); } INLINEDEF _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); Nd4jLong *shape = shape::shapeOf(shapeInfo); printf("Rank %d\n",rank); printf("Shape:\n"); for(int i = 0; i < rank; i++) { printf(" %lld ",(long long) shape[i]); } printf("\n"); Nd4jLong *stride = shape::stride(shapeInfo); printf("Stride:\n"); for(int i = 0; i < rank; i++) { printf(" %lld ", (long long) stride[i]); } printf("\n"); printf("Order %c\n",shape::order(shapeInfo)); } INLINEDEF _CUDA_HD void printShapeInfoLinear(Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); int lim = shape::shapeInfoLength(rank); printf("ShapeInfo: ["); for (int i = 0; i < lim; i++) { printf("%lld", (long long) shapeInfo[i]); if (i < lim - 1) { printf(", "); } } printf("]\n"); #ifndef __CUDA_ARCH__ fflush(stdout); #endif } INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, Nd4jLong *shape, Nd4jLong *strides) { printf("%s : [", msg); for (int i = 0; i < rank; i++) { printf("%lld, ", (long long) shape[i]); } for (int i = 0; i < rank; i++) { printf("%lld", (long long) strides[i]); if (i < rank - 1) printf(", "); } printf("]\n"); #ifndef __CUDA_ARCH__ fflush(stdout); #endif } INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); int lim = shape::shapeInfoLength(rank); printf("%s : [", msg); for (int i = 0; i < lim; i++) { printf("%lld", (long long) shapeInfo[i]); if (i < lim - 1) { printf(", "); } } printf("]\n"); #ifndef __CUDACC__ fflush(stdout); #endif } INLINEDEF _CUDA_HD void printArray(float *arr,int length) { printf("Array: ["); for (int i = 0; i < length; i ++) { printf("%f", arr[i]); if (i + 1 < length) printf(", "); } printf("]\n"); } /** * Given an linear index, element wise stride * and the length of each tad * map a linear index to a tad * @param i the index to map * @param the element wise stride for the tads * @param numElementsPerTad the number of elements * per tad */ INLINEDEF _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad) { return i / (numElementsPerTad * elementWiseStride); } /** * Map a tad to a * reduction index. * @param tadIndexForOriginal the original tad index for the * split up problem (eg: split is dimension 3 mapping to a 2,3 problem) * @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3) * @param tadsForOriginal the number of tads for the smaller problem (eg: 3) */ INLINEDEF _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced, int tadsForOriginal) { if (tadIndexForOriginal == 0) return 0; return tadIndexForOriginal / (tadsForOriginal / tadsForReduced); } INLINEDEF _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer) { int rank = shape::rank(shapeBuffer); Nd4jLong *shape = shape::shapeOf(shapeBuffer); Nd4jLong *strides = shape::stride(shapeBuffer); // swap shape for (int e = 0; e < rank / 2; e++) { int idx1 = rank - e - 1; int idx2 = e; int tmp = shape[idx2]; shape[idx2] = shape[idx1]; shape[idx1] = tmp; } // swap strides for (int e = 0; e < rank / 2; e++) { int idx1 = rank - e - 1; int idx2 = e; int tmp = strides[idx2]; strides[idx2] = strides[idx1]; strides[idx1] = tmp; } if (shape::order(shapeBuffer) == 'c') shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 102; else shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 99; } /** * Tad index for linear * @param linearIndex * @param tadLength * @return */ INLINEDEF _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength) { return linearIndex % tadLength; } /** * Computes the number of tads * per reduce index for the * reduction tad. */ INLINEDEF _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal) { return tadsForOriginal / tadsForReduce; } /** * Maps a linear index to a reduction index * @param i the linear index to map * @param elementWiseStride the element wise stride * for the multiple problem * @param tadNum the number of tads for the shrunken problem * @param originalTadNum the tad number for the reduced version of the problem */ INLINEDEF _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad, int tadNum, int originalTadNum) { int tad = tadIndex(i, elementWiseStride, numElementsPerTad); return reductionIndexForTad(tad, tadNum, originalTadNum); } INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo() { traceNew(30); auto shape = new Nd4jLong[1]; shape[0] = 1; auto stride = new Nd4jLong[1]; stride[0] = 1; auto shapeInformation2 = new ShapeInformation(); shapeInformation2->rank = 1; shapeInformation2->offset = 0; shapeInformation2->stride = stride; shapeInformation2->shape = shape; shapeInformation2->elementWiseStride = 1; shapeInformation2->order = 99; Nd4jLong *ret = shape::toShapeBuffer(shapeInformation2); delete shapeInformation2; delete[] shape; delete[] stride; return ret; } INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret) { ret[0] = 2; ret[1] = 1; ret[2] = 1; ret[3] = 1; ret[4] = 1; ret[5] = 0; ret[6] = 1; ret[7] = 99; return ret; } /** * Returns the prod of the data * up to the given length */ INLINEDEF _CUDA_HD int prod(Nd4jLong *data, int length) { int prod = 1; for (int i = 0; i < length; i++) { prod *= data[i]; } return prod; } /** * Returns the prod of the data * up to the given length */ INLINEDEF _CUDA_HD Nd4jLong prodLong( Nd4jLong *data, int length) { Nd4jLong prod = 1; for (int i = 0; i < length; i++) { prod *= data[i]; } return prod; } INLINEDEF _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data, Nd4jLong *dimension,int dimensionLength) { Nd4jLong *stride = shape::stride(data); //corner case: return the final item when its greater than the max, since its guaranteed to be left over //note here that strides are interpreted in reverse for tad //start from the front rather than the back int rank = shape::rank(data); if(shape::order(data) == 'f') { int dimIdx = dimensionLength - 1; for(int i = rank - 1; i >= 0; i--) { /** * Needs to find an algorithm such that: * looping backwards will find the highest dimension left * that isn't included in the dimension index list. * * This can also be thought of as the last item of the first index * of the difference between the full list of indices and * the dimension indices. * * We should avoid excessive object creation by only looping backwards. */ if(dimension[dimIdx--] != i) { int ret = stride[i]; return ret; } } } else { int dimIdx = dimensionLength - 1; for(int i = rank - 1; i >= 0; i--) { /** * Needs to find an algorithm such that: * looping backwards will find the highest dimension left * that isn't included in the dimension index list. * * This can also be thought of as the last item of the first index * of the difference between the full list of indices and * the dimension indices. * * We should avoid excessive object creation by only looping backwards. */ if(dimension[dimIdx--] != i) { int ret = stride[i]; return ret; } } } int ret = stride[0]; return ret; } #ifdef __CUDACC__ __device__ INLINEDEF void sweepShapeInfoBuffer(Nd4jLong *shapeInfoBuffer, Nd4jLong *targetBuffer) { // we read first element, to find out length of our shapeInfoBuffer int rank = shapeInfoBuffer[0]; int len = shape::shapeInfoLength(rank); for (int i = threadIdx.x; i < len; i += blockDim.x) targetBuffer[i] = shapeInfoBuffer[i]; } #endif INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr) { return shape::shapeBufferOfNpy(arr.shape.size(),(unsigned int*) arr.shape.data(),arr.fortranOrder); } // INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer) { // unsigned Nd4jLong *shape; // unsigned int ndims, wordSize; // bool fortranOrder; // cnpy::parseNpyHeaderStr(std::string(buffer),wordSize,shape,ndims,fortranOrder); // Nd4jLong * ret = shape::shapeBufferOfNpy(ndims,shape,fortranOrder); // delete[] shape; // return ret; // } INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(int rank, unsigned int* shape,bool fortranOrder) { if(fortranOrder) { Nd4jLong *shapeBufferRet = shape::shapeBufferFortran(rank,(Nd4jLong *) shape); return shapeBufferRet; } else { Nd4jLong *newShape = new Nd4jLong[rank]; for(int i = 0; i < rank; i++) { newShape[i] = shape[i]; } Nd4jLong *shapeBufferRet = shape::shapeBuffer(rank,newShape); delete[] newShape; return shapeBufferRet; } } INLINEDEF _CUDA_HD bool strideDescendingCAscendingF(Nd4jLong *shapeBuffer) { int rank = shape::rank(shapeBuffer); Nd4jLong *strides = shape::stride(shapeBuffer); char order = shape::order(shapeBuffer); if (shape::isRowVector(shapeBuffer) && strides[0] == 1 && strides[1] == 1) return true; if (order == 'c') { for (int i = 1; i < rank; i++) if (strides[i-1] <= strides[i]) return false; return true; } else if (order == 'f') { for (int i = 1; i < rank; i++) if (strides[i-1] >= strides[i]) return false; return true; } else { printf("Unknown order for array!\n"); return false; } } INLINEDEF _CUDA_H bool reshapeCF(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder, Nd4jLong* target) { int oldnd; Nd4jLong* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape)); Nd4jLong* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape)); int np, op, last_stride; int oi, oj, ok, ni, nj, nk; Nd4jLong* newStrides = new Nd4jLong[newRank]; oldnd = 0; /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oi = 0; oi < oldRank; oi++) { if (shape::shapeOf(oldShape)[oi] != 1) { olddims[oldnd] = shape::shapeOf(oldShape)[oi]; oldstrides[oldnd] = shape::stride(oldShape)[oi]; oldnd++; } } np = 1; for (ni = 0; ni < newRank; ni++) { np *= newShapeOf[ni]; } op = 1; for (oi = 0; oi < oldnd; oi++) { op *= olddims[oi]; } if (np != op) { /* different total sizes; no hope */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } if (np == 0) { /* the current code does not handle 0-sized arrays, so give up */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } /* oi to oj and ni to nj give the axis ranges currently worked with */ oi = 0; oj = 1; ni = 0; nj = 1; while (ni < newRank && oi < oldnd) { np = newShapeOf[ni]; op = olddims[oi]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newShapeOf[nj++]; } else { op *= olddims[oj++]; } } /* Check whether the original axes can be combined */ for (ok = oi; ok < oj - 1; ok++) { if (isFOrder) { if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) { /* not contiguous enough */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } } else { /* C order */ if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) { /* not contiguous enough */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } } } /* Calculate new strides for all axes currently worked with */ if (isFOrder) { newStrides[ni] = oldstrides[oi]; for (nk = ni + 1; nk < nj; nk++) { newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1]; } } else { /* C order */ newStrides[nj - 1] = oldstrides[oj - 1]; for (nk = nj - 1; nk > ni; nk--) { newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk]; } } ni = nj++; oi = oj++; } if (ni >= 1) { last_stride = newStrides[ni - 1]; } else { last_stride = shape::elementWiseStride(oldShape); } if (isFOrder && ni >= 1) { last_stride *= newShapeOf[ni - 1]; } for (nk = ni; nk < newRank; nk++) { newStrides[nk] = last_stride; } target[0] = newRank; int cnt = 1; for (int e = 0; e < newRank; e++) target[cnt++] = newShapeOf[e]; for (int e = 0; e < newRank; e++) target[cnt++] = newStrides[e]; target[shape::shapeInfoLength(newRank) - 3] = 0; target[shape::shapeInfoLength(newRank) - 2] = -1; target[shape::shapeInfoLength(newRank) - 1] = isFOrder ? 102 : 99; delete[] olddims; delete[] oldstrides; delete[] newStrides; return true; } INLINEDEF _CUDA_H bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder) { int oldnd; Nd4jLong* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape)); Nd4jLong* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape)); int np, op, last_stride; int oi, oj, ok, ni, nj, nk; auto newStrides = new Nd4jLong[newRank]; oldnd = 0; /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oi = 0; oi < oldRank; oi++) { if (shape::shapeOf(oldShape)[oi] != 1) { olddims[oldnd] = shape::shapeOf(oldShape)[oi]; oldstrides[oldnd] = shape::stride(oldShape)[oi]; oldnd++; } } np = 1; for (ni = 0; ni < newRank; ni++) { np *= newShapeOf[ni]; } op = 1; for (oi = 0; oi < oldnd; oi++) { op *= olddims[oi]; } if (np != op) { /* different total sizes; no hope */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } if (np == 0) { /* the current code does not handle 0-sized arrays, so give up */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } /* oi to oj and ni to nj give the axis ranges currently worked with */ oi = 0; oj = 1; ni = 0; nj = 1; while (ni < newRank && oi < oldnd) { np = newShapeOf[ni]; op = olddims[oi]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newShapeOf[nj++]; } else { op *= olddims[oj++]; } } /* Check whether the original axes can be combined */ for (ok = oi; ok < oj - 1; ok++) { if (isFOrder) { if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) { /* not contiguous enough */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } } else { /* C order */ if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) { /* not contiguous enough */ delete[] olddims; delete[] oldstrides; delete[] newStrides; return false; } } } /* Calculate new strides for all axes currently worked with */ if (isFOrder) { newStrides[ni] = oldstrides[oi]; for (nk = ni + 1; nk < nj; nk++) { newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1]; } } else { /* C order */ newStrides[nj - 1] = oldstrides[oj - 1]; for (nk = nj - 1; nk > ni; nk--) { newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk]; } } ni = nj++; oi = oj++; } delete[] olddims; delete[] oldstrides; delete[] newStrides; return true; } // this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions) // also it sorts input array of dimensions, this operation is also necessary for creating TAD object INLINEDEF _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions) { int dimSize = dimensions.size(); if(dimSize == 0) throw std::runtime_error("shape::checkDimensions method: array of dimensions is empty!"); // check presence of negative dimensions and if they are present transform them to positive ones -dim -> rank - |dim| for(auto& dim : dimensions) if(dim < 0) dim += rank; // sort input array of dimensions, this operation is also necessary for creating TAD object in external methods if (dimSize > 1) { std::sort(dimensions.begin(), dimensions.end()); // remove duplicates if they are present dimensions.erase(std::unique(dimensions.begin(), dimensions.end()), dimensions.end()); } // check whether number of dimensions is to big (>rank) dimSize = dimensions.size(); if(dimSize > rank) throw std::runtime_error("shape::checkDimensions method: number of input dimensions is too big ( > rank of array)!"); // check if min dimension is still negative and whether max dimension is bigger then rank-1 if(dimensions[0] < 0 || dimensions.back() > (rank-1)) throw std::runtime_error("shape::checkDimensions method: the negative dimension is still present in input array after transform or the too big dimension is present ( > rank of array) !"); } // return absolute index of array min, min is sub-array of max, index to be returned is min's index and corresponds to maxIdx of max array INLINEDEF _CUDA_H Nd4jLong subArrayIndex(const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int maxIdx) { const int rankMax = maxShapeInfo[0]; const int rankMin = minShapeInfo[0]; auto* idxPerRank = new Nd4jLong[rankMax]; ind2subC(rankMax, const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<int&>(maxIdx), idxPerRank); Nd4jLong minIdx = 0; for(int i = 0; i < rankMin; ++i) { if(minShapeInfo[rankMin - i] == 1 || idxPerRank[rankMax - i - 1] == 0) continue; if(idxPerRank[rankMax - i - 1] >= minShapeInfo[rankMin - i]) idxPerRank[rankMax - i - 1] %= minShapeInfo[rankMin - i]; minIdx += idxPerRank[rankMax - i - 1] * stride(const_cast<Nd4jLong*>(minShapeInfo))[rankMin - i - 1]; } delete[] idxPerRank; return minIdx; } INLINEDEF _CUDA_HD void shapeScalar(Nd4jLong* const buffer) { buffer[0] = 0; buffer[1] = 0; buffer[2] = 1; buffer[3] = 99; } INLINEDEF _CUDA_HD void shapeOldScalar(Nd4jLong* const buffer, const char order) { buffer[0] = 2; buffer[1] = 1; buffer[2] = 1; buffer[3] = 1; buffer[4] = 1; buffer[5] = 0; buffer[6] = 1; buffer[7] = (int)order; } INLINEDEF _CUDA_HD void shapeVector(const Nd4jLong length, Nd4jLong* const buffer) { buffer[0] = 1; buffer[1] = length; buffer[2] = 1; buffer[3] = 0; buffer[4] = 1; buffer[5] = 99; } template <typename T1, typename T2> INLINEDEF _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length) { for (Nd4jLong e = 0; e < length; e++) to[e] = (T2) from[e]; }; } #endif /* SHAPE_H_ */
calib.c
/* Copyright 2013-2016. The Regents of the University of California. * Copyright 2016. Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2012-2016 Martin Uecker <martin.uecker@med.uni-goettingen.de> * 2013 Dara Bahri <dbahri123@gmail.com> * 2015-2016 Siddharth Iyer <sid8795@gmail.com> * * * Uecker M, Lai P, Murphy MJ, Virtue P, Elad M, Pauly JM, Vasanawala SS, Lustig M. * ESPIRiT - An Eigenvalue Approach to Autocalibrating Parallel MRI: Where SENSE * meets GRAPPA. Magn Reson Med, 71:990-1001 (2014) * * Iyer S, Ong F, Lustig M. * Towards A Parameter Free ESPIRiT: Soft-Weighting For Robust Coil Sensitivity Estimation. * Presented in the session: "New Frontiers In Image Reconstruction" at ISMRM 2016. * http://www.ismrm.org/16/program_files/O86.htm * */ #include <assert.h> #include <complex.h> #include <math.h> #include <stdbool.h> #include "num/multind.h" #include "num/fft.h" #include "num/flpmath.h" #include "num/linalg.h" #include "num/lapack.h" #include "num/casorati.h" #include "num/rand.h" #include "misc/misc.h" #include "misc/mri.h" #include "misc/resize.h" #include "misc/debug.h" #include "misc/utils.h" #include "calib/calmat.h" #include "calib/cc.h" #include "calib/softweight.h" #include "calib.h" #ifdef USE_CUDA #include "calib/calibcu.h" #endif #if 0 #define CALMAT_SVD #endif #if 0 #define FLIP #endif #ifndef M_PI #define M_PI 3.14159265358979323846 #endif static void eigen_herm3(int M, int N, float val[M], complex float matrix[N][N]) // ordering might be different to herm2 { complex float mout[M][N]; for (int li = 0; li < N; li++) for (int lj = 0; lj < li; lj++) matrix[lj][li] = conj(matrix[li][lj]); //mat_identity(M, N, mout); orthiter(M, N, 30, val, mout, matrix); for (int i = 0; i < M; i++) for (int j = 0; j < N; j++) matrix[i][j] = mout[i][j]; } static float scurve(float x) { if (x <= -1.) return 0.; if (x >= 1.) return 1.; return 0.5 * (1. + 2. * x / (1. + powf(x, 2.))); } static float crop_weight_function(float crth, float val) { return scurve((sqrtf(val) - crth) / (1. - crth)); } static float crop_thresh_function(float crth, float val) { return (val <= crth) ? 0. : 1.; } typedef float (*weight_function)(float crth, float val); static void crop_weight(const long dims[DIMS], complex float* ptr, weight_function fun, float crth, const complex float* map) { long xx = dims[0]; long yy = dims[1]; long zz = dims[2]; long cc = dims[3]; long mm = dims[4]; assert(DIMS > 5); assert(1 == md_calc_size(DIMS - 5, dims + 5)); for (long m = 0; m < mm; m++) { #pragma omp parallel for for (long k = 0; k < zz; k++) { for (long i = 0; i < yy; i++) { for (long j = 0; j < xx; j++) { float val = cabsf(map[((m * zz + k) * yy + i) * xx + j]); for (long c = 0; c < cc; c++) ptr[(((m * cc + c) * zz + k) * yy + i) * xx + j] *= fun(crth, val); } } } } } void crop_sens(const long dims[DIMS], complex float* ptr, bool soft, float crth, const complex float* map) { crop_weight(dims, ptr, soft ? crop_weight_function : crop_thresh_function, crth, map); } /** * sure_crop - This determines the crop-threshold to use as described in the talk: "Towards A Parameter * Free ESPIRiT: Soft-Weighting For Robust Coil Sensitivity Estimation". This was given at the * session: "New Frontiers In Image Reconstruction" at ISMRM 2016. * * Parameters: * var - Estimated variance in data. * evec_dims - The eigenvector dimensions. * evec_data - The eigenvectors. * eptr - The eigenvalues. * calreg_dims - Dimension of the calibration region. * calreg - Calibration data. */ static float sure_crop(float var, const long evec_dims[5], complex float* evec_data, complex float* eptr, const long calreg_dims[5], const complex float* calreg) { long num_maps = evec_dims[4]; // Construct low-resolution image long im_dims[5]; md_select_dims(5, 15, im_dims, evec_dims); complex float* im = md_alloc_sameplace(5, im_dims, CFL_SIZE, calreg); md_clear(5, im_dims, im, CFL_SIZE); md_resize_center(5, im_dims, im, calreg_dims, calreg, CFL_SIZE); ifftuc(5, im_dims, FFT_FLAGS, im, im); // Temporary vector for crop dimensions long cropdims[5]; md_select_dims(5, 15, cropdims, calreg_dims); cropdims[4] = num_maps; // Eigenvectors (M) complex float* M = md_alloc_sameplace(5, evec_dims, CFL_SIZE, calreg); md_copy(5, evec_dims, M, evec_data, CFL_SIZE); // Temporary eigenvector holder to hold low resolution maps complex float* LM = md_alloc_sameplace(5, evec_dims, CFL_SIZE, calreg); // Temporary holder for projection calreg complex float* TC = md_alloc_sameplace(5, calreg_dims, CFL_SIZE, calreg); // Temporary holder to hold low resolution calib maps complex float* CM = md_alloc_sameplace(5, cropdims, CFL_SIZE, calreg); // Eigenvalues (W) long W_dims[5]; md_select_dims(5, 23, W_dims, evec_dims); complex float* W = md_alloc_sameplace(5, W_dims, CFL_SIZE, calreg); md_copy(5, W_dims, W, eptr, CFL_SIZE); // Place holder for the inner product result complex float* ip = md_alloc_sameplace(5, W_dims, CFL_SIZE, calreg); // Place holder for the projection result complex float* proj = md_alloc_sameplace(5, im_dims, CFL_SIZE, calreg); // Place holder for divergence term long div_dims[5] = MD_INIT_ARRAY(5, 1); complex float* div = md_alloc_sameplace(5, div_dims, CFL_SIZE, calreg); // Calculating strides. long str1_ip[5]; long str2_ip[5]; long stro_ip[5]; md_calc_strides(5, str1_ip, im_dims, CFL_SIZE); md_calc_strides(5, str2_ip, evec_dims, CFL_SIZE); md_calc_strides(5, stro_ip, W_dims, CFL_SIZE); long str1_proj[5]; long str2_proj[5]; long stro_proj[5]; md_calc_strides(5, str1_proj, W_dims, CFL_SIZE); md_calc_strides(5, str2_proj, evec_dims, CFL_SIZE); md_calc_strides(5, stro_proj, im_dims, CFL_SIZE); long str1_div[5]; long str2_div[5]; long stro_div[5]; md_calc_strides(5, str1_div, evec_dims, CFL_SIZE); md_calc_strides(5, str2_div, evec_dims, CFL_SIZE); md_calc_strides(5, stro_div, div_dims, CFL_SIZE); long tdims_ip[5]; long tdims_proj[5]; for (int i = 0; i < 5; i++) { assert((im_dims[i] == evec_dims[i]) || (1 == im_dims[i]) || (1 == evec_dims[i])); assert((W_dims[i] == evec_dims[i]) || (1 == W_dims[i]) || (1 == evec_dims[i])); tdims_ip[i] = (1 == im_dims[i]) ? evec_dims[i] : im_dims[i]; tdims_proj[i] = (1 == W_dims[i]) ? evec_dims[i] : W_dims[i]; } // Starting parameter sweep with SURE. float mse = -1.; float old_mse = 0.; float s = -0.1; float c = 0.99; long ctr1 = 0; long ctr2 = 0; debug_printf(DP_INFO, "---------------------------------------------\n"); debug_printf(DP_INFO, "| CTR1 | CTR2 | Crop | Est. MSE |\n"); debug_printf(DP_INFO, "---------------------------------------------\n"); while (fabs(s) > 1.E-4) { ctr1++; while ( (c < 0.999) && (c > 0.001) && ( (ctr2 <= 1) || (mse < old_mse))) { ctr2++; md_clear(5, W_dims, ip, CFL_SIZE); md_clear(5, im_dims, proj, CFL_SIZE); md_clear(5, div_dims, div, CFL_SIZE); md_clear(5, evec_dims, M, CFL_SIZE); md_clear(5, evec_dims, LM, CFL_SIZE); md_clear(5, calreg_dims, TC, CFL_SIZE); md_copy(5, evec_dims, M, evec_data, CFL_SIZE); old_mse = mse; mse = 0.; crop_weight(evec_dims, M, crop_thresh_function, c, W); md_zfmacc2(5, tdims_ip, stro_ip, ip, str1_ip, im, str2_ip, M); // Projection. md_zfmac2(5, tdims_proj, stro_proj, proj, str1_proj, ip, str2_proj, M); fftuc(5, im_dims, FFT_FLAGS, proj, proj); // Low res proj img. md_resize_center(5, calreg_dims, TC, im_dims, proj, CFL_SIZE); md_resize_center(5, im_dims, proj, calreg_dims, TC, CFL_SIZE); ifftuc(5, im_dims, FFT_FLAGS, proj, proj); for (long jdx = 0; jdx < md_calc_size(5, im_dims); jdx++) mse += powf(cabsf(im[jdx] - proj[jdx]), 2.); fftuc(5, evec_dims, FFT_FLAGS, LM, M); // low-res maps . md_resize_center(5, cropdims, CM, evec_dims, LM, CFL_SIZE); md_resize_center(5, evec_dims, LM, cropdims, CM, CFL_SIZE); ifftuc(5, evec_dims, FFT_FLAGS, LM, LM); md_zfmacc2(5, evec_dims, stro_div, div, str1_div, LM, str2_div, LM); // Calc SURE div using low res maps. mse += 2. * var * crealf(*div); if (ctr2 == 1) debug_printf(DP_INFO, "| %4ld | %4ld | %0.4f | %0.12e |\n", ctr1, ctr2, c, mse); else debug_printf(DP_INFO, "| | %4ld | %0.4f | %0.12e |\n", ctr2, c, mse); c = c + s; } c -= s; ctr2 = 0; s = -s / 2; c += s; } c = c + s; debug_printf(DP_INFO, "---------------------------------------------\n"); md_free(im); md_free(TC); md_free(CM); md_free(M); md_free(LM); md_free(W); md_free(ip); md_free(proj); md_free(div); debug_printf(DP_DEBUG1, "Calculated c: %.4f\n", c); return c; } void calone(const struct ecalib_conf* conf, const long cov_dims[4], complex float* imgcov, int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data) { assert(1 == md_calc_size(DIMS - 5, calreg_dims + 5)); #if 1 long nskerns_dims[5]; complex float* nskerns; compute_kernels(conf, nskerns_dims, &nskerns, SN, svals, calreg_dims, data); #else long channels = calreg_dims[3]; long kx = conf->kdims[0]; long ky = conf->kdims[1]; long kz = conf->kdims[2]; long nskerns_dims[5] = { kx, ky, kz, channels, 0 }; long N = md_calc_size(4, nskerns_dims); assert(N > 0); nskerns_dims[4] = N; complex float* nskerns = md_alloc(5, nskerns_dims, CFL_SIZE); long nr_kernels = channels; nskerns_dims[4] = channels; spirit_kernel(nskerns_dims, nskerns, calreg_dims, data); #endif compute_imgcov(cov_dims, imgcov, nskerns_dims, nskerns); md_free(nskerns); } /* calculate point-wise maps * */ void eigenmaps(const long out_dims[DIMS], complex float* optr, complex float* eptr, const complex float* imgcov2, const long msk_dims[3], const bool* msk, bool orthiter, bool ecal_usegpu) { #ifdef USE_CUDA if (ecal_usegpu) { //FIXME cuda version should be able to return sensitivities for a subset of image-space points assert(!msk); eigenmapscu(out_dims, optr, eptr, imgcov2); return; } #else assert(!ecal_usegpu); #endif long channels = out_dims[3]; long maps = out_dims[4]; assert(DIMS >= 5); assert(1 == md_calc_size(DIMS - 5, out_dims + 5)); assert(maps <= channels); long xx = out_dims[0]; long yy = out_dims[1]; long zz = out_dims[2]; float scale = 1.; // for some reason, not if (msk_dims) { assert(msk_dims[0] == xx); assert(msk_dims[1] == yy); assert(msk_dims[2] == zz); } md_clear(5, out_dims, optr, CFL_SIZE); #pragma omp parallel for collapse(3) for (long k = 0; k < zz; k++) { for (long j = 0; j < yy; j++) { for (long i = 0; i < xx; i++) { if (!msk || msk[i + xx * (j + yy * k)]) { float val[channels]; complex float cov[channels][channels]; complex float tmp[channels * (channels + 1) / 2]; for (long l = 0; l < channels * (channels + 1) / 2; l++) tmp[l] = imgcov2[((l * zz + k) * yy + j) * xx + i] / scale; unpack_tri_matrix(channels, cov, tmp); if (orthiter) eigen_herm3(maps, channels, val, cov); else lapack_eig(channels, val, cov); for (long u = 0; u < maps; u++) { long ru = (orthiter ? maps : channels) - 1 - u; for (long v = 0; v < channels; v++) optr[((((u * channels + v) * zz + k) * yy + j) * xx + i)] = cov[ru][v]; if (NULL != eptr) eptr[((u * zz + k) * yy + j) * xx + i] = val[ru]; } } } } } } void caltwo(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* emaps, const long in_dims[4], complex float* in_data, const long msk_dims[3], const bool* msk) { long xx = out_dims[0]; long yy = out_dims[1]; long zz = out_dims[2]; long xh = in_dims[0]; long yh = in_dims[1]; long zh = in_dims[2]; long channels = out_dims[3]; long cosize = channels * (channels + 1) / 2; assert(DIMS >= 5); assert(1 == md_calc_size(DIMS - 5, out_dims + 5)); assert(in_dims[3] == cosize); long cov_dims[4] = { xh, yh, zh, cosize }; long covbig_dims[4] = { xx, yy, zz, cosize }; assert(((xx == 1) && (xh == 1)) || (xx >= xh)); assert(((yy == 1) && (yh == 1)) || (yy >= yh)); assert(((zz == 1) && (zh == 1)) || (zz >= zh)); assert((1 == xh) || (0 == xh % 2)); assert((1 == yh) || (0 == yh % 2)); assert((1 == zh) || (0 == zh % 2)); complex float* imgcov2 = md_alloc(4, covbig_dims, CFL_SIZE); debug_printf(DP_DEBUG1, "Resize...\n"); sinc_zeropad(4, covbig_dims, imgcov2, cov_dims, in_data); debug_printf(DP_DEBUG1, "Point-wise eigen-decomposition...\n"); eigenmaps(out_dims, out_data, emaps, imgcov2, msk_dims, msk, conf->orthiter, conf->usegpu); md_free(imgcov2); } void calone_dims(const struct ecalib_conf* conf, long cov_dims[4], long channels) { long kx = conf->kdims[0]; long ky = conf->kdims[1]; long kz = conf->kdims[2]; cov_dims[0] = (1 == kx) ? 1 : (2 * kx); cov_dims[1] = (1 == ky) ? 1 : (2 * ky); cov_dims[2] = (1 == kz) ? 1 : (2 * kz); cov_dims[3] = channels * (channels + 1) / 2; } const struct ecalib_conf ecalib_defaults = { { 6, 6, 6 }, 0.001, -1, -1., false, false, 0.8, true, false, -1., false, true, -1., false}; void calib2(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* eptr, int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data, const long msk_dims[3], const bool* msk) { long channels = calreg_dims[3]; long maps = out_dims[4]; assert(calreg_dims[3] == out_dims[3]); assert(maps <= channels); assert(1 == md_calc_size(DIMS - 5, out_dims + 5)); assert(1 == md_calc_size(DIMS - 5, calreg_dims + 5)); complex float rot[channels][channels]; if (conf->rotphase) { // rotate the the phase with respect to the first principle component long scc_dims[DIMS] = MD_INIT_ARRAY(DIMS, 1); scc_dims[COIL_DIM] = channels; scc_dims[MAPS_DIM] = channels; scc(scc_dims, &rot[0][0], calreg_dims, data); } else { for (int i = 0; i < channels; i++) for (int j = 0; j < channels; j++) rot[i][j] = (i == j) ? 1. : 0.; } long cov_dims[4]; calone_dims(conf, cov_dims, channels); complex float* imgcov = md_alloc(4, cov_dims, CFL_SIZE); calone(conf, cov_dims, imgcov, SN, svals, calreg_dims, data); caltwo(conf, out_dims, out_data, eptr, cov_dims, imgcov, msk_dims, msk); /* Intensity and phase normalization similar as proposed * for adaptive combine (Walsh's method) in * Griswold et al., ISMRM 10:2410 (2002) */ if (conf->intensity) { debug_printf(DP_DEBUG1, "Normalize...\n"); /* I think the reason this works is because inhomogeneity usually * comes from only a few coil elements which are close. The l1-norm * is more resilient against such outliers. -- Martin */ normalizel1(DIMS, COIL_FLAG, out_dims, out_data); md_zsmul(DIMS, out_dims, out_data, out_data, sqrtf((float)channels)); } float c = (conf->crop >= 0.) ? conf->crop : sure_crop(conf->var, out_dims, out_data, eptr, calreg_dims, data); debug_printf(DP_DEBUG1, "Crop maps... (c = %.2f)\n", c); crop_sens(out_dims, out_data, conf->softcrop, c, eptr); debug_printf(DP_DEBUG1, "Fix phase...\n"); fixphase2(DIMS, out_dims, COIL_DIM, rot[0], out_data, out_data); md_free(imgcov); } void calib(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* eptr, int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data) { calib2(conf, out_dims, out_data, eptr, SN, svals, calreg_dims, data, NULL, NULL); } static void perturb(const long dims[2], complex float* vecs, float amt) { complex float* noise = md_alloc(2, dims, CFL_SIZE); md_gaussian_rand(2, dims, noise); for (long j = 0; j < dims[1]; j++) { float nrm = md_znorm(1, dims, noise + j * dims[0]); complex float val = amt / nrm; md_zsmul(1, dims, noise + j * dims[0], noise + j * dims[0], val); } md_zadd(2, dims, vecs, vecs, noise); for (long j = 0; j < dims[1]; j++) { float nrm = md_znorm(1, dims, vecs + j * dims[0]); complex float val = 1 / nrm; md_zsmul(1, dims, vecs + j * dims[0], vecs + j * dims[0], val); } md_free(noise); } static int number_of_kernels(const struct ecalib_conf* conf, int N, const float val[N]) { int n = 0; if (-1 != conf->numsv) { n = conf->numsv; assert(-1. == conf->percentsv); assert(-1. == conf->threshold); } else if (conf->percentsv != -1.) { n = N * conf->percentsv / 100.; assert(-1 == conf->numsv); assert(-1. == conf->threshold); } else { assert(-1 == conf->numsv); assert(-1. == conf->percentsv); for (int i = 0; i < N; i++) if (val[i] / val[0] > sqrtf(conf->threshold)) n++; } if (val[0] <= 0.) error("No signal.\n"); debug_printf(DP_DEBUG1, "Using %d/%ld kernels (%.2f%%, last SV: %f%s).\n", n, N, (float)n / (float)N * 100., (n > 0) ? (val[n - 1] / val[0]) : 1., conf->weighting ? ", weighted" : ""); float tr = 0.; for (int i = 0; i < N; i++) { tr += powf(val[i], 2.); debug_printf(DP_DEBUG3, "SVALS %f (%f)\n", val[i], val[i] / val[0]); } debug_printf(DP_DEBUG3, "\nTRACE: %f (%f)\n", tr, tr / (float)N); assert(n <= N); return n; } void compute_kernels(const struct ecalib_conf* conf, long nskerns_dims[5], complex float** nskerns_ptr, int SN, float val[SN], const long caldims[DIMS], const complex float* caldata) { assert(1 == md_calc_size(DIMS - 5, caldims + 5)); nskerns_dims[0] = conf->kdims[0]; nskerns_dims[1] = conf->kdims[1]; nskerns_dims[2] = conf->kdims[2]; nskerns_dims[3] = caldims[3]; long N = md_calc_size(4, nskerns_dims); assert(N > 0); nskerns_dims[4] = N; complex float* nskerns = md_alloc(5, nskerns_dims, CFL_SIZE); *nskerns_ptr = nskerns; PTR_ALLOC(complex float[N][N], vec); assert(NULL != val); assert(SN == N); debug_printf(DP_DEBUG1, "Build calibration matrix and SVD...\n"); #ifdef CALMAT_SVD calmat_svd(conf->kdims, N, *vec, val, caldims, caldata); if (conf->weighting) soft_weight_singular_vectors(N, conf->var, conf->kdims, caldims, val, val); for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) #ifndef FLIP nskerns[i * N + j] = ((*vec)[j][i]) * (conf->weighting ? val[i] : 1.); #else nskerns[i * N + j] = ((*vec)[j][N - 1 - i]) * (conf->weighting ? val[N - 1 - i] : 1.); #endif #else covariance_function(conf->kdims, N, *vec, caldims, caldata); debug_printf(DP_DEBUG1, "Eigen decomposition... (size: %ld)\n", N); // we could apply Nystroem method here to speed it up float tmp_val[N]; lapack_eig(N, tmp_val, *vec); // reverse and square root, test for smaller null to avoid NaNs for (int i = 0; i < N; i++) val[i] = (tmp_val[N - 1 - i] < 0.) ? 0. : sqrtf(tmp_val[N - 1 - i]); if (conf->weighting) soft_weight_singular_vectors(N, conf-> var, conf->kdims, caldims, val, val); for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) #ifndef FLIP nskerns[i * N + j] = (*vec)[N - 1 - i][j] * (conf->weighting ? val[i] : 1.); // flip #else nskerns[i * N + j] = (*vec)[i][j] * (conf->weighting ? val[N - 1 - i] : 1.); // flip #endif #endif if (conf->perturb > 0.) { long dims[2] = { N, N }; perturb(dims, nskerns, conf->perturb); } #ifndef FLIP nskerns_dims[4] = number_of_kernels(conf, N, val); #else nskerns_dims[4] = N - number_of_kernels(conf, N, val); #endif PTR_FREE(vec); } void compute_imgcov(const long cov_dims[4], complex float* imgcov, const long nskerns_dims[5], const complex float* nskerns) { debug_printf(DP_DEBUG1, "Zeropad...\n"); long xh = cov_dims[0]; long yh = cov_dims[1]; long zh = cov_dims[2]; long kx = nskerns_dims[0]; long ky = nskerns_dims[1]; long kz = nskerns_dims[2]; long channels = nskerns_dims[3]; long nr_kernels = nskerns_dims[4]; long imgkern_dims[5] = { xh, yh, zh, channels, nr_kernels }; complex float* imgkern1 = md_alloc(5, imgkern_dims, CFL_SIZE); complex float* imgkern2 = md_alloc(5, imgkern_dims, CFL_SIZE); md_resize_center(5, imgkern_dims, imgkern1, nskerns_dims, nskerns, CFL_SIZE); // resort array debug_printf(DP_DEBUG1, "FFT (juggling)...\n"); long istr[5]; long mstr[5]; long idim[5] = { xh, yh, zh, channels, nr_kernels }; long mdim[5] = { nr_kernels, channels, xh, yh, zh }; md_calc_strides(5, istr, idim, CFL_SIZE); md_calc_strides(5, mstr, mdim, CFL_SIZE); long m2str[5] = { mstr[2], mstr[3], mstr[4], mstr[1], mstr[0] }; ifftmod(5, imgkern_dims, FFT_FLAGS, imgkern1, imgkern1); ifft2(5, imgkern_dims, FFT_FLAGS, m2str, imgkern2, istr, imgkern1); float scalesq = (kx * ky * kz) * (xh * yh * zh); // second part for FFT scaling md_free(imgkern1); debug_printf(DP_DEBUG1, "Calculate Gram matrix...\n"); int cosize = channels * (channels + 1) / 2; assert(cov_dims[3] == cosize); #pragma omp parallel for collapse(3) for (int k = 0; k < zh; k++) { for (int j = 0; j < yh; j++) { for (int i = 0; i < xh; i++) { complex float gram[cosize]; gram_matrix2(channels, gram, nr_kernels, (const complex float (*)[nr_kernels])(imgkern2 + ((k * yh + j) * xh + i) * (channels * nr_kernels))); #ifdef FLIP // add (scaled) identity matrix for (int i = 0, l = 0; i < channels; i++) for (int j = 0; j <= i; j++, l++) gram[l] = ((i == j) ? (kx * ky * kz) : 0.) - gram[l]; #endif for (int l = 0; l < cosize; l++) imgcov[(((l * zh) + k) * yh + j) * xh + i] = gram[l] / scalesq; } } } md_free(imgkern2); }
move_particle_utility.h
// KRATOS ___ ___ _ ___ __ ___ ___ ___ ___ // / __/ _ \| \| \ \ / /__| \_ _| __| __| // | (_| (_) | .` |\ V /___| |) | || _|| _| // \___\___/|_|\_| \_/ |___/___|_| |_| APPLICATION // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Pablo Becker // #if !defined(KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED) #define KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/node.h" /// #include "includes/dof.h" #include "includes/variables.h" #include "containers/array_1d.h" #include "containers/data_value_container.h" #include "includes/mesh.h" #include "utilities/math_utils.h" #include "processes/node_erase_process.h" /// #include "utilities/geometry_utilities.h" #include "includes/model_part.h" #include "spatial_containers/spatial_containers.h" #include "spatial_containers/bounding_box.h" #include "spatial_containers/cell.h" #include "spatial_containers/bins_dynamic_objects.h" #include "utilities/spatial_containers_configure.h" #include "geometries/line_2d_2.h" #include "geometries/triangle_2d_3.h" #include "geometries/triangle_3d_3.h" #include "geometries/point.h" #include "convection_diffusion_application.h" #include "convection_particle.h" #include "utilities/openmp_utils.h" #include "time.h" //#include "processes/process.h" namespace Kratos { //this class is to be modified by the user to customize the interpolation process template< unsigned int TDim> class MoveParticleUtilityScalarTransport { public: typedef SpatialContainersConfigure<TDim> Configure; typedef typename Configure::PointType PointType; //typedef PointType::CoordinatesArrayType CoordinatesArrayType; typedef typename Configure::ContainerType ContainerType; //typedef Configure::PointerType PointerType; typedef typename Configure::IteratorType IteratorType; typedef typename Configure::ResultContainerType ResultContainerType; //typedef Configure::ResultPointerType ResultPointerType; typedef typename Configure::ResultIteratorType ResultIteratorType; typedef PointerVector< Convection_Particle, Convection_Particle*, std::vector<Convection_Particle*> > ParticlePointerVector; //typedef Configure::ContactPairType ContactPairType; //typedef Configure::ContainerContactType ContainerContactType; //typedef Configure::IteratorContactType IteratorContactType; //typedef Configure::PointerContactType PointerContactType; //typedef Configure::PointerTypeIterator PointerTypeIterator; KRATOS_CLASS_POINTER_DEFINITION(MoveParticleUtilityScalarTransport); //template<unsigned int TDim> MoveParticleUtilityScalarTransport(ModelPart& model_part, int maximum_number_of_particles) : mr_model_part(model_part) , mmaximum_number_of_particles(maximum_number_of_particles) , mUnknownVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetUnknownVariable()) , mProjectionVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetProjectionVariable()) , mVelocityVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetVelocityVariable()) , mMeshVelocityVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetMeshVelocityVariable()) { std::cout << "initializing moveparticle utility for scalar transport" << std::endl; Check(); //storing water and air density and their inverses, just in case it is needed for the streamline integration //loop in elements to change their ID to their position in the array. Easier to get information later. //DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!! ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; ielem->SetId(ii+1); } mlast_elem_id= (mr_model_part.ElementsEnd()-1)->Id(); int node_id=0; // we look for the smallest edge. could be used as a weighting function when going lagrangian->eulerian instead of traditional shape functions(method currently used) ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator pnode = inodebegin+ii; array_1d<double,3> position_node; double distance=0.0; position_node = pnode->Coordinates(); WeakPointerVector< Node<3> >& rneigh = pnode->GetValue(NEIGHBOUR_NODES); //we loop all the nodes to check all the edges const double number_of_neighbours = double(rneigh.size()); for( WeakPointerVector<Node<3> >::iterator inode = rneigh.begin(); inode!=rneigh.end(); inode++) { array_1d<double,3> position_difference; position_difference = inode->Coordinates() - position_node; double current_distance= sqrt(pow(position_difference[0],2)+pow(position_difference[1],2)+pow(position_difference[2],2)); //if (current_distance>distance) // distance=current_distance; distance += current_distance / number_of_neighbours; } //and we save the largest edge. pnode->FastGetSolutionStepValue(MEAN_SIZE)=distance; node_id=pnode->GetId(); } } mlast_node_id=node_id; //we also calculate the element mean size in the same way, for the courant number //also we set the right size to the LHS column for the pressure enrichments, in order to recover correctly the enrichment pressure vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); //before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element. #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; double mElemSize; array_1d<double,3> Edge(3,0.0); Edge = ielem->GetGeometry()[1].Coordinates() - ielem->GetGeometry()[0].Coordinates(); mElemSize = Edge[0]*Edge[0]; for (unsigned int d = 1; d < TDim; d++) mElemSize += Edge[d]*Edge[d]; for (unsigned int i = 2; i < (TDim+1); i++) for(unsigned int j = 0; j < i; j++) { Edge = ielem->GetGeometry()[i].Coordinates() - ielem->GetGeometry()[j].Coordinates(); double Length = Edge[0]*Edge[0]; for (unsigned int d = 1; d < TDim; d++) Length += Edge[d]*Edge[d]; if (Length < mElemSize) mElemSize = Length; } mElemSize = sqrt(mElemSize); ielem->GetValue(MEAN_SIZE) = mElemSize; } } //matrix containing the position of the 4/15/45 particles that we will seed at the beggining BoundedMatrix<double, 5*(1+TDim), 3 > pos; BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N; int particle_id=0; mnelems = mr_model_part.Elements().size(); std::cout << "about to resize vectors" << std::endl; //setting the right size to the vector containing the particles assigned to each element //particles vector. this vector contains ALL the particles in the simulation. mparticles_vector.resize(mnelems*mmaximum_number_of_particles); //and this vector contains the current number of particles that are in each element (currently zero) mnumber_of_particles_in_elems.resize(mnelems); mnumber_of_particles_in_elems=ZeroVector(mnelems); //when moving the particles, an auxiliary vector is necessary (to store the previous number) mnumber_of_particles_in_elems_aux.resize(mnelems); //each element will have a list of pointers to all the particles that are inside. //this vector contains the pointers to the vector of (particle) pointers of each element. mvector_of_particle_pointers_vectors.resize(mnelems); //int artz; //std::cin >> artz; int i_int=0; //careful! it's not the id, but the position inside the array! std::cout << "about to create particles" << std::endl; //now we seed: LOOP IN ELEMENTS //using loop index, DO NOT paralelize this! change lines : mparticles_in_elems_pointers((ii*mmaximum_number_of_particles)+mparticles_in_elems_integers(ii)) = pparticle; and the next one moffset=0; //Convection_Particle& firstparticle =mparticles_vector[0]; for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; //(ielem->GetValue(BED_PARTICLE_POINTERS)) = ParticlePointerVector( mmaximum_number_of_particles*2, &firstparticle ); //ParticlePointerVector& particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); //now we link the mpointers_to_particle_pointers_vectors to the corresponding element //mpointers_to_particle_pointers_vectors(ii) = &particle_pointers; //now we resize the vector of particle pointers. it is double sized because we move the particles from an initial position (first half) to a final position (second half). //for(int j=0; j<(mmaximum_number_of_particles*2); j++) // particle_pointers.push_back(&firstparticle); mvector_of_particle_pointers_vectors[ii] = ParticlePointerVector( mmaximum_number_of_particles*2 ); ParticlePointerVector& particle_pointers = mvector_of_particle_pointers_vectors[ii]; //int & number_of_particles = ielem->GetValue(NUMBER_OF_BED_PARTICLES); int & number_of_particles = mnumber_of_particles_in_elems[ii]; number_of_particles=0; Geometry< Node<3> >& geom = ielem->GetGeometry(); //unsigned int elem_id = ielem->Id(); //mareas_vector[i_int]=CalculateArea(geom); UNUSED SO COMMENTED ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45 //now we seed the particles in the current element for (unsigned int j = 0; j < pos.size1(); j++) { ++particle_id; Convection_Particle& pparticle = mparticles_vector[particle_id-1]; pparticle.X()=pos(j,0); pparticle.Y()=pos(j,1); pparticle.Z()=pos(j,2); pparticle.GetEraseFlag()=false; float & scalar1= pparticle.GetScalar1(); scalar1=0.0; for (unsigned int k = 0; k < (TDim+1); k++) { scalar1 += N(j, k) * geom[k].FastGetSolutionStepValue(mUnknownVar); } particle_pointers(j) = &pparticle; number_of_particles++ ; } ++i_int; } m_nparticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true. KRATOS_WATCH(m_nparticles); //KRATOS_WATCH(mlast_elem_id); mparticle_printing_tool_initialized=false; //std::cin >> artz; } virtual ~MoveParticleUtilityScalarTransport() {} void MountBin() { KRATOS_TRY //copy the elements to a new container, as the list will //be shuffled duringthe construction of the tree ContainerType& rElements = mr_model_part.ElementsArray(); IteratorType it_begin = rElements.begin(); IteratorType it_end = rElements.end(); //const int number_of_elem = rElements.size(); typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) ); paux.swap(mpBinsObjectDynamic); //BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end ); std::cout << "finished mounting Bins" << std::endl; KRATOS_CATCH("") } void CalculateVelOverElemSize() { KRATOS_TRY //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const double nodal_weight = 1.0/ (1.0 + double (TDim) ); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Geometry<Node<3> >& geom = ielem->GetGeometry(); array_1d<double, 3 >vector_mean_velocity=ZeroVector(3); for (unsigned int i=0; i != (TDim+1) ; i++) vector_mean_velocity += geom[i].FastGetSolutionStepValue(mVelocityVar); vector_mean_velocity *= nodal_weight; const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) ); ielem->GetValue(MEAN_VEL_OVER_ELEM_SIZE) = mean_velocity / ( ielem->GetValue(MEAN_SIZE) ); } } KRATOS_CATCH("") } //name self explained void ResetBoundaryConditions() { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; if (inode->IsFixed(mUnknownVar)) { inode->FastGetSolutionStepValue(mUnknownVar)=inode->GetSolutionStepValue(mUnknownVar,1); } } } KRATOS_CATCH("") } void CalculateDeltaVariables() { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DELTA_SCALAR1) = inode->FastGetSolutionStepValue(mUnknownVar) - inode->FastGetSolutionStepValue(mProjectionVar) ; } } KRATOS_CATCH("") } void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->GetSolutionStepValue(OriginVariable,1) = inode->FastGetSolutionStepValue(OriginVariable); } } KRATOS_CATCH("") } //to move all the particles across the streamlines. heavy task! void MoveParticles() { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part) //since it is the only function in the whole procedure that does this, it must use alternatively one part and the other. //KRATOS_WATCH(offset) bool even_timestep; if (offset!=0) even_timestep=false; else even_timestep=true; const int post_offset = mmaximum_number_of_particles*int(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles //KRATOS_WATCH(post_offset) double delta_t = CurrentProcessInfo[DELTA_TIME]; array_1d<double,TDim+1> N; const unsigned int max_results = 10000; //double integration_distance= 2.0; max_nsubsteps = 10; max_substep_dt=delta_t/double(max_nsubsteps); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); //before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element. #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { //ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii; int & number_of_particles = mnumber_of_particles_in_elems[ii]; //old_element->GetValue(NUMBER_OF_BED_PARTICLES); mnumber_of_particles_in_elems_aux[ii]=number_of_particles; mnumber_of_particles_in_elems[ii]=0; //we reset the local vectors for a faster access; } } std::cout << "convecting particles" << std::endl; //We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle) #pragma omp barrier #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { ResultContainerType results(max_results); WeakPointerVector< Element > elements_in_trajectory; elements_in_trajectory.resize(20); for(unsigned int ielem=element_partition[kkk]; ielem<element_partition[kkk+1]; ielem++) { //for(unsigned int ielem=0; ielem<mr_model_part.Elements().size(); ielem++) //{ ModelPart::ElementsContainerType::iterator old_element = ielembegin+ielem; const int old_element_id = old_element->Id(); ParticlePointerVector& old_element_particle_pointers = mvector_of_particle_pointers_vectors(old_element_id-1); if ( (results.size()) !=max_results) results.resize(max_results); unsigned int number_of_elements_in_trajectory=0; //excluding the origin one (current one, ielem) for(int ii=0; ii<(mnumber_of_particles_in_elems_aux(ielem)); ii++) { Convection_Particle & pparticle = old_element_particle_pointers[offset+ii]; Element::Pointer pcurrent_element( *old_element.base() ); ResultIteratorType result_begin = results.begin(); bool & erase_flag=pparticle.GetEraseFlag(); if (erase_flag==false){ MoveParticle(pparticle,pcurrent_element,elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina const int current_element_id = pcurrent_element->Id(); int & number_of_particles_in_current_elem = mnumber_of_particles_in_elems(current_element_id-1); //int & number_of_water_particles_in_current_elem = mnumber_of_water_particles_in_elems(current_element_id-1); if (number_of_particles_in_current_elem<mmaximum_number_of_particles && erase_flag==false) { { ParticlePointerVector& current_element_particle_pointers = mvector_of_particle_pointers_vectors(current_element_id-1); #pragma omp critical { if (number_of_particles_in_current_elem<mmaximum_number_of_particles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!! { current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &pparticle; number_of_particles_in_current_elem++ ; if (number_of_particles_in_current_elem>mmaximum_number_of_particles) KRATOS_WATCH("MAL"); } else pparticle.GetEraseFlag()=true; //so we just delete it! } } } else pparticle.GetEraseFlag()=true; //so we just delete it! } } } } /* //now we pass info from the local vector to the elements: #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii; old_element->GetValue(NUMBER_OF_BED_PARTICLES) = mnumber_of_particles_in_elems(ii); //old_element->GetValue(NUMBER_OF_WATER_PARTICLES) = mnumber_of_water_particles_in_elems(ii); } } */ //after having changed everything we change the status of the modd_timestep flag: moffset = post_offset;; // KRATOS_CATCH("") } void TransferLagrangianToEulerian() //explicit { KRATOS_TRY //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); //const double delta_t =CurrentProcessInfo[DELTA_TIME]; const double threshold= 0.0/(double(TDim)+1.0); std::cout << "projecting info to mesh" << std::endl; const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles //we must project data from the particles (lagrangian) into the eulerian mesh //ValuesVectorType eulerian_nodes_old_temperature; //int nnodes = mr_model_part.Nodes().size(); //array_1d<double,(n_nodes)> eulerian_nodes_sumweights; //we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes //though we could've use a bigger buffer, to be changed later! //after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles. ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(mProjectionVar)=0.0; inode->FastGetSolutionStepValue(YP)=0.0; } } //adding contribution, loop on elements, since each element has stored the particles found inside of it vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; array_1d<double,3*(TDim+1)> nodes_positions; array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1)); array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1)); //array_1d<double,(TDim+1)> weighting_inverse_divisor; Geometry<Node<3> >& geom = ielem->GetGeometry(); for (int i=0 ; i!=(TDim+1) ; ++i) { nodes_positions[i*3+0]=geom[i].X(); nodes_positions[i*3+1]=geom[i].Y(); nodes_positions[i*3+2]=geom[i].Z(); //weighting_inverse_divisor[i]=1.0/((geom[i].FastGetSolutionStepValue(MEAN_SIZE))*1.01); } ///KRATOS_WATCH(ielem->Id()) ///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size()); //int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii]; ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii]; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; Convection_Particle & pparticle = element_particle_pointers[offset+iii]; if (pparticle.GetEraseFlag()==false) { array_1d<double,3> & position = pparticle.Coordinates(); const float& particle_scalar1 = pparticle.GetScalar1(); // -1 if water, +1 if air array_1d<double,TDim+1> N; bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N); if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element. { KRATOS_WATCH(N); for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 && N[j]> -1e-5) N[j]=1e-10; } for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element { //double sq_dist = 0; //these lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions //for (int k=0 ; k!=(TDim); k++) sq_dist += ((position[k] - nodes_positions[j*3+k])*(position[k] - nodes_positions[j*3+k])); //double weight = (1.0 - (sqrt(sq_dist)*weighting_inverse_divisor[j] ) ); double weight=N(j)*N(j); //weight=N(j)*N(j)*N(j); if (weight<threshold) weight=1e-10; if (weight<0.0) {KRATOS_WATCH(weight)}//;weight=0.0;KRATOS_WATCH(velocity);KRATOS_WATCH(N);KRATOS_WATCH(number_of_particles_in_elem);}//{KRATOS_WATCH(weight); KRATOS_WATCH(geom[j].Id()); KRATOS_WATCH(position);} else { nodes_addedweights[j]+= weight; //nodes_addedtemp[j] += weight * particle_temp; nodes_added_scalar1[j] += weight*particle_scalar1; }// } } } for (int i=0 ; i!=(TDim+1) ; ++i) { geom[i].SetLock(); geom[i].FastGetSolutionStepValue(mProjectionVar) +=nodes_added_scalar1[i]; geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i]; geom[i].UnSetLock(); } } } #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; double sum_weights = inode->FastGetSolutionStepValue(YP); if (sum_weights>0.00001) { //inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT)=(inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT))/sum_weights; //resetting the temperature double & height = inode->FastGetSolutionStepValue(mProjectionVar); height /=sum_weights; //resetting the density } else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case.. { inode->FastGetSolutionStepValue(mProjectionVar)=inode->FastGetSolutionStepValue(mUnknownVar,1); //resetting the temperature } } } KRATOS_CATCH("") } void TransferLagrangianToEulerianImp() //semi implicit { KRATOS_TRY // ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); std::cout << "projecting info to mesh (semi implicit)" << std::endl; const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles //we must project data from the particles (lagrangian) into the eulerian mesh //ValuesVectorType eulerian_nodes_old_temperature; //int nnodes = mr_model_part.Nodes().size(); //array_1d<double,(n_nodes)> eulerian_nodes_sumweights; //we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes //though we could've use a bigger buffer, to be changed later! //after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles. ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(mProjectionVar)=0.0; inode->FastGetSolutionStepValue(YP)=0.0; } } //adding contribution, loop on elements, since each element has stored the particles found inside of it vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { //creating a matrix for each of the problems. BoundedMatrix<double, TDim+1 , TDim+1 > mass_matrix; // WE ONLY NEED ONE! they are the same for all the variables! //_x,mass_matrix_y,mass_matrix_z,mass_matrix_d; //mass matrices for the projected vel (x,y,z) and the distance array_1d<double,(TDim+1)> rhs_scalar1; array_1d<double,3*(TDim+1)> nodes_positions; array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1)); array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1)); for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; nodes_added_scalar1 = ZeroVector((TDim+1)); //resetting vectors nodes_addedweights = ZeroVector((TDim+1)); //resetting vectors mass_matrix = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices. WE ONLY NEED ONE! they are the same for all the variable. only the rhs changes. //mass_matrix_y = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices //mass_matrix_z = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices //mass_matrix_d = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices rhs_scalar1 = ZeroVector((TDim+1)); //resetting vectors Geometry<Node<3> >& geom = ielem->GetGeometry(); const double elem_volume = geom.Area(); for (int i=0 ; i!=(TDim+1) ; ++i) //saving the nodal positions for faster access { nodes_positions[i*3+0]=geom[i].X(); nodes_positions[i*3+1]=geom[i].Y(); nodes_positions[i*3+2]=geom[i].Z(); } ///KRATOS_WATCH(ielem->Id()) ///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size()); //int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii]; ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii]; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; Convection_Particle & pparticle = element_particle_pointers[offset+iii]; if (pparticle.GetEraseFlag()==false) { array_1d<double,3> & position = pparticle.Coordinates(); const float& particle_scalar1 = pparticle.GetScalar1(); // -1 if water, +1 if air array_1d<double,TDim+1> N; bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N); if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element. { KRATOS_WATCH(N); for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 && N[j]> -1e-5) N[j]=1e-10; } for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element { double weight=N(j); for (int k=0 ; k!=(TDim+1); k++) //building the mass matrix mass_matrix(j,k) += weight*N(k); rhs_scalar1[j] += weight * double(particle_scalar1); //adding also a part with the lumped mass matrix to reduce overshoots and undershoots if(true) { double this_particle_weight = weight*elem_volume/(double(number_of_particles_in_elem))*0.1; //can be increased or reduced to change the lumped mass contrubtion nodes_addedweights[j]+= this_particle_weight; nodes_added_scalar1[j] += this_particle_weight*particle_scalar1; } } } } //now we invert the matrix BoundedMatrix<double, TDim+1 , TDim+1 > inverse_mass_matrix=ZeroMatrix(TDim+1 , TDim+1); if(TDim==3) InvertMatrix( mass_matrix, inverse_mass_matrix); else InvertMatrix3x3( mass_matrix, inverse_mass_matrix); //and now compute the elemental contribution to the gobal system: if(number_of_particles_in_elem>(TDim*3)) //otherwise it's impossible to define a correctly the gradients, therefore the results inside the element are useless. { for (int i=0 ; i!=(TDim+1); i++) { for (int j=0 ; j!=(TDim+1); j++) { nodes_added_scalar1[i] += inverse_mass_matrix(i,j)*rhs_scalar1[j]*elem_volume*(1.0/(double(1+TDim))); } } //and also to the mass matrix. LUMPED (but for the contribution of the grandient at elemental level. for (int i=0 ; i!=(TDim+1); i++) nodes_addedweights[i] += elem_volume*(1.0/(double(1+TDim))); } for (int i=0 ; i!=(TDim+1) ; ++i) { geom[i].SetLock(); geom[i].FastGetSolutionStepValue(mProjectionVar) +=nodes_added_scalar1[i]; geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i]; geom[i].UnSetLock(); } } } #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; double sum_weights = inode->FastGetSolutionStepValue(YP); if (sum_weights>0.00001) { double & scalar1 = inode->FastGetSolutionStepValue(mProjectionVar); scalar1 /=sum_weights; //resetting the density } else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case.. { inode->FastGetSolutionStepValue(mProjectionVar)=inode->FastGetSolutionStepValue(mUnknownVar,1); } } } KRATOS_CATCH("") } void CorrectParticlesWithoutMovingUsingDeltaVariables() { KRATOS_TRY //std::cout << "updating particles" << std::endl; //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles //KRATOS_WATCH(offset) ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Element::Pointer pelement(*ielem.base()); Geometry<Node<3> >& geom = ielem->GetGeometry(); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); //int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES); int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii]; ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii]; //std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { //KRATOS_WATCH(iii) if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; Convection_Particle & pparticle = element_particle_pointers[offset+iii]; bool erase_flag= pparticle.GetEraseFlag(); if (erase_flag==false) { CorrectParticleUsingDeltaVariables(pparticle,pelement,geom); //'lite' version, we pass by reference the geometry, so much cheaper } } } } KRATOS_CATCH("") } //************************************************************************************************************** //************************************************************************************************************** template< class TDataType > void AddUniqueWeakPointer (WeakPointerVector< TDataType >& v, const typename TDataType::WeakPointer candidate) { typename WeakPointerVector< TDataType >::iterator i = v.begin(); typename WeakPointerVector< TDataType >::iterator endit = v.end(); while ( i != endit && (i)->Id() != (candidate.lock())->Id()) { i++; } if( i == endit ) { v.push_back(candidate); } } //************************************************************************************************************** //************************************************************************************************************** void PreReseed(int minimum_number_of_particles) { KRATOS_TRY //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset =moffset; const int max_results = 1000; //tools for the paralelization unsigned int number_of_threads = OpenMPUtils::GetNumThreads(); vector<unsigned int> elem_partition; int number_of_rows=mr_model_part.Elements().size(); elem_partition.resize(number_of_threads + 1); int elem_partition_size = number_of_rows / number_of_threads; elem_partition[0] = 0; elem_partition[number_of_threads] = number_of_rows; //KRATOS_WATCH(elem_partition_size); for (unsigned int i = 1; i < number_of_threads; i++) elem_partition[i] = elem_partition[i - 1] + elem_partition_size; ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel firstprivate(elem_partition) { ResultContainerType results(max_results); int k = OpenMPUtils::ThisThread(); //ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k]; //ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ; //ModelPart::NodesContainerType local_list=aux[k]; //PointerVectorSet<Convection_Particle, IndexedObject> & list=aux[k]; //KRATOS_WATCH(k); BoundedMatrix<double, (TDim+1), 3 > pos; BoundedMatrix<double, (TDim+1) , (TDim+1) > N; unsigned int freeparticle=0; //we start with the first position in the particles array //int local_id=1; for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; results.resize(max_results); //const int & elem_id = ielem->Id(); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); //int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES); int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii]; ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii]; if (number_of_particles_in_elem<(minimum_number_of_particles))// && (ielem->GetGeometry())[0].Y()<0.10 ) { //KRATOS_WATCH("elem with little particles") Geometry< Node<3> >& geom = ielem->GetGeometry(); ComputeGaussPointPositionsForPreReseed(geom, pos, N); //double conductivity = ielem->GetProperties()[CONDUCTIVITY]; //KRATOS_WATCH(conductivity); for (unsigned int j = 0; j < (pos.size1()); j++) //i am dropping the last one, the one in the middle of the element { bool keep_looking = true; while(keep_looking) { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { mparticles_vector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else freeparticle++; } else { freeparticle++; } } Convection_Particle pparticle(pos(j,0),pos(j,1),pos(j,2)); array_1d<double,TDim+1>aux2_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N); if (is_found==false) { KRATOS_WATCH(aux2_N); } pparticle.GetEraseFlag()=false; ResultIteratorType result_begin = results.begin(); Element::Pointer pelement( *ielem.base() ); MoveParticle_inverse_way(pparticle, pelement, result_begin, max_results); //and we copy it to the array: mparticles_vector[freeparticle] = pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle]; pparticle.GetEraseFlag()=false; number_of_particles_in_elem++; } } } } KRATOS_CATCH("") } //************************************************************************************************************** //************************************************************************************************************** void PostReseed(int minimum_number_of_particles) //pooyan's way { KRATOS_TRY //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = moffset; //TOOLS FOR THE PARALELIZATION //int last_id= (mr_linea_model_part.NodesEnd()-1)->Id(); unsigned int number_of_threads = OpenMPUtils::GetNumThreads(); //KRATOS_WATCH(number_of_threads); vector<unsigned int> elem_partition; int number_of_rows=mr_model_part.Elements().size(); //KRATOS_WATCH(number_of_threads); //KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", ""); elem_partition.resize(number_of_threads + 1); int elem_partition_size = number_of_rows / number_of_threads; elem_partition[0] = 0; elem_partition[number_of_threads] = number_of_rows; //KRATOS_WATCH(elem_partition_size); for (unsigned int i = 1; i < number_of_threads; i++) elem_partition[i] = elem_partition[i - 1] + elem_partition_size; //typedef Node < 3 > PointType; //std::vector<ModelPart::NodesContainerType> aux;// aux; //aux.resize(number_of_threads); //ModelPart::NodesContainerType::iterator it_begin_particle_model_part = mr_linea_model_part.NodesBegin(); //ModelPart::NodesContainerType::iterator it_end_particle_model_part = mr_linea_model_part.NodesEnd(); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel firstprivate(elem_partition) // firstprivate(results)//we will add the nodes in different parts of aux and later assemple everything toghether, remaming particles ids to get consecutive ids { unsigned int reused_particles=0; unsigned int freeparticle = 0; //we start by the first position; int k = OpenMPUtils::ThisThread(); //ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k]; //ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ; BoundedMatrix<double, (3+2*TDim), 3 > pos; //7 particles (2D) or 9 particles (3D) BoundedMatrix<double, (3+2*TDim), (TDim+1) > N; double mesh_scalar1; array_1d<int, (3+2*TDim) > positions; unsigned int number_of_reseeded_particles; //unsigned int number_of_water_reseeded_particles; //array_1d<double, 3 > nodes_distances; for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; //int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES); //ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS)); int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii]; ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii]; Geometry< Node<3> >& geom = ielem->GetGeometry(); if ( (number_of_particles_in_elem<(minimum_number_of_particles)))// && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(minimum_number_of_particles) ) ) { //bool reseed_more=false; number_of_reseeded_particles=0; //reseed_more=true; number_of_reseeded_particles= 3+2*TDim; ComputeGaussPointPositionsForPostReseed(geom, pos, N); for (unsigned int j = 0; j < number_of_reseeded_particles; j++) { //now we have to find an empty space ( a particle that was about to be deleted) in the particles model part. once found. there will be our renewed particle: bool keep_looking = true; while(keep_looking) { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { mparticles_vector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else freeparticle++; } else { freeparticle++; } } Convection_Particle pparticle(pos(j,0),pos(j,1),pos(j,2)); array_1d<double,TDim+1>aux_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux_N); if (is_found==false) { KRATOS_WATCH(aux_N); KRATOS_WATCH(j) KRATOS_WATCH(ielem->Id()) } mesh_scalar1 = 0.0; for (unsigned int l = 0; l < (TDim+1); l++) { mesh_scalar1 += N(j,l) * geom[l].FastGetSolutionStepValue(mUnknownVar); } pparticle.GetScalar1()=mesh_scalar1; pparticle.GetEraseFlag()=false; mparticles_vector[freeparticle]=pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle]; number_of_particles_in_elem++; if (keep_looking) { KRATOS_THROW_ERROR(std::logic_error, "FINISHED THE LIST AND COULDNT FIND A FREE CELL FOR THE NEW PARTICLE!", ""); } else { reused_particles++; } } } } } KRATOS_CATCH("") } void ExecuteParticlesPritingTool( ModelPart& lagrangian_model_part, int input_filter_factor ) { KRATOS_TRY //mfilter_factor; //we will only print one out of every "filter_factor" particles of the total particle list if(mparticle_printing_tool_initialized==false) { mfilter_factor=input_filter_factor; if(lagrangian_model_part.NodesBegin()-lagrangian_model_part.NodesEnd()>0) KRATOS_THROW_ERROR(std::logic_error, "AN EMPTY MODEL PART IS REQUIRED FOR THE PRINTING OF PARTICLES", ""); lagrangian_model_part.AddNodalSolutionStepVariable(DISPLACEMENT); lagrangian_model_part.AddNodalSolutionStepVariable(mUnknownVar); for (unsigned int i=0; i!=((mmaximum_number_of_particles*mnelems)/mfilter_factor)+mfilter_factor; i++) { Node < 3 > ::Pointer pnode = lagrangian_model_part.CreateNewNode( i+mlast_node_id+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!! //pnode->SetBufferSize(mr_model_part.NodesBegin()->GetBufferSize()); pnode->SetBufferSize(1); } mparticle_printing_tool_initialized=true; } //resetting data of the unused particles const double inactive_particle_position= -10.0; array_1d<double,3>inactive_particle_position_vector; inactive_particle_position_vector(0)=inactive_particle_position; inactive_particle_position_vector(1)=inactive_particle_position; inactive_particle_position_vector(2)=inactive_particle_position; ModelPart::NodesContainerType::iterator inodebegin = lagrangian_model_part.NodesBegin(); for(unsigned int ii=0; ii<lagrangian_model_part.Nodes().size(); ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(mUnknownVar) = 0.0; inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector; } int counter=0; //ModelPart::NodesContainerType::iterator it_begin = lagrangian_model_part.NodesBegin(); for (int i=0; i!=mmaximum_number_of_particles*mnelems; i++) { Convection_Particle& pparticle =mparticles_vector[i]; if(pparticle.GetEraseFlag()==false && i%mfilter_factor==0) { ModelPart::NodesContainerType::iterator inode = inodebegin+counter; //copying info from the particle to the (printing) node. inode->FastGetSolutionStepValue(mUnknownVar) = pparticle.GetScalar1(); inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates(); counter++; } } KRATOS_CATCH("") } protected: private: ///this function moves a particle according to the "velocity" given ///by "rVariable". The movement is performed in nsubsteps, during a total time ///of Dt void MoveParticle( Convection_Particle & pparticle, Element::Pointer & pelement, WeakPointerVector< Element >& elements_in_trajectory, unsigned int & number_of_elements_in_trajectory, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; unsigned int nsubsteps; double substep_dt; bool KEEP_INTEGRATING=false; bool is_found; //bool have_air_node; //bool have_water_node; array_1d<double,3> vel; array_1d<double,3> vel_without_other_phase_nodes=ZeroVector(3); array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. position = pparticle.Coordinates(); //initial coordinates double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { KEEP_INTEGRATING=true; Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j]; } //calculating substep to get +- courant(substep) = 0.1 nsubsteps = 10.0 * (delta_t * pelement->GetValue(MEAN_VEL_OVER_ELEM_SIZE)); if (nsubsteps<1) nsubsteps=1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0;// weight;//*double(nsubsteps); position += vel*substep_dt;//weight; //DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH SEDIMENT_VELOCITY ////////////////////////////////////////////////////////////////////////////////////////////////////// unsigned int check_from_element_number=0; for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle. { if (KEEP_INTEGRATING==true) { is_found = FindNodeOnMesh(position, N ,pelement,elements_in_trajectory,number_of_elements_in_trajectory,check_from_element_number,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel = ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j]; } only_integral += 1.0; //values saved for the current time step position+=vel*substep_dt;//weight; } else { KEEP_INTEGRATING=false; break; } } else break; } } if (KEEP_INTEGRATING==false) (pparticle.GetEraseFlag()=true); else is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pelement) if (is_found==false) ( pparticle.GetEraseFlag()=true); pparticle.Coordinates() = position; } void CorrectParticleUsingDeltaVariables( Convection_Particle & pparticle, Element::Pointer & pelement, Geometry< Node<3> >& geom) { array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. array_1d<double,3> coords = pparticle.Coordinates(); float & particle_scalar1 = pparticle.GetScalar1(); //double distance=0.0; double delta_scalar1 = 0.0; bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == false) { KRATOS_WATCH(N) for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 ) N[j]=1e-10; } for(unsigned int j=0; j<(TDim+1); j++) { delta_scalar1 += geom[j].FastGetSolutionStepValue(DELTA_SCALAR1)*N[j]; } particle_scalar1 = particle_scalar1 + delta_scalar1; } void MoveParticle_inverse_way( Convection_Particle & pparticle, Element::Pointer & pelement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO! ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; unsigned int nsubsteps; double substep_dt; bool KEEP_INTEGRATING=false; bool is_found; array_1d<double,3> vel; array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; double scalar1 = 0.0; //we start with the first position, then it will enter the loop. position = pparticle.Coordinates(); // + (pparticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { KEEP_INTEGRATING=true; Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); scalar1=0.0; for(unsigned int j=0; j<(TDim+1); j++) { scalar1 += geom[j].FastGetSolutionStepValue(mUnknownVar)*N(j); noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j]; } //calculating substep to get +- courant(substep) = 1/4 nsubsteps = 10.0 * (delta_t * pelement->GetValue(MEAN_VEL_OVER_ELEM_SIZE)); if (nsubsteps<1) nsubsteps=1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0;// weight;//*double(nsubsteps); position -= vel*substep_dt;//weight; for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle. { if (KEEP_INTEGRATING==true) { is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); scalar1=0.0; for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j] ; scalar1 += geom[j].FastGetSolutionStepValue(mUnknownVar)*N(j); } only_integral += 1.0;//weight ; //values saved for the current time step position-=vel*substep_dt;//weight; } else KEEP_INTEGRATING=false; } } pparticle.GetScalar1()=scalar1; } //else {KRATOS_WATCH(position); } } ///this function should find the element into which a given node is located ///and return a pointer to the element and the vector containing the ///shape functions that define the postion within the element ///if "false" is devolved the element is not found bool FindNodeOnMesh( array_1d<double,3>& position, array_1d<double,TDim+1>& N, Element::Pointer & pelement, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; const array_1d<double,3>& coords = position; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N); if(is_found_1 == true) //that was easy! { return true; } //to begin with we check the neighbour elements; it is a bit more expensive WeakPointerVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS); //the first we check is the one that has negative shape function, because it means it went outside in this direction: //commented, it is not faster than simply checking all the neighbours (branching) /* unsigned int checked_element=0; for (unsigned int i=0;i!=(TDim+1);i++) { if (N[i]<0.0) { checked_element=i; Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); N=aux_N; return true; } break; } } */ //we check all the neighbour elements for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); return true; } } //if checking all the neighbour elements did not work, we have to use the bins //ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults ); if(results_found>0){ //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == true) { pelement=Element::Pointer((*(result_begin+i))); return true; } } } //if nothing worked, then: //not found case return false; } // VERSION INCLUDING PREDEFINED ELEMENTS FOLLOWING A TRAJECTORY bool FindNodeOnMesh( array_1d<double,3>& position, array_1d<double,TDim+1>& N, Element::Pointer & pelement, WeakPointerVector< Element >& elements_in_trajectory, unsigned int & number_of_elements_in_trajectory, unsigned int & check_from_element_number, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; const array_1d<double,3>& coords = position; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N); if(is_found_1 == true) { return true; //that was easy! } //if it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element. for (unsigned int i=(check_from_element_number);i!=number_of_elements_in_trajectory;i++) { Geometry<Node<3> >& geom = elements_in_trajectory[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=Element::Pointer(((elements_in_trajectory(i)))); N=aux_N; check_from_element_number = i+1 ; //now i element matches pelement, so to avoid cheching twice the same element we send the counter to the following element. return true; } } //now we check the neighbour elements: WeakPointerVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS); //the first we check is the one that has negative shape function, because it means it went outside in this direction: //commented, it is not faster than simply checking all the neighbours (branching) /* unsigned int checked_element=0; for (unsigned int i=0;i!=(TDim+1);i++) { if (N[i]<0.0) { checked_element=i; Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); N=aux_N; return true; } break; } } */ //we check all the neighbour elements for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); if (number_of_elements_in_trajectory<20) { elements_in_trajectory(number_of_elements_in_trajectory)=pelement; number_of_elements_in_trajectory++; check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list } return true; } } //if checking all the neighbour elements did not work, we have to use the bins //ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults ); if(results_found>0) { //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == true) { pelement=Element::Pointer((*(result_begin+i))); if (number_of_elements_in_trajectory<20) { elements_in_trajectory(number_of_elements_in_trajectory)=pelement; number_of_elements_in_trajectory++; check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list } return true; } } } //not found case return false; } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 3 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double area = CalculateVol(x0, y0, x1, y1, x2, y2); double inv_area = 0.0; if (area == 0.0) { KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", ""); } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; //KRATOS_WATCH(N); if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //////////// //using the pre loaded nodal coordinates inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions, const double xc, const double yc, const double zc, array_1d<double, 3 > & N ) { const double& x0 = nodes_positions[0]; const double& y0 = nodes_positions[1]; const double& x1 = nodes_positions[3]; const double& y1 = nodes_positions[4]; const double& x2 = nodes_positions[6]; const double& y2 = nodes_positions[7]; double area = CalculateVol(x0, y0, x1, y1, x2, y2); double inv_area = 0.0; if (area == 0.0) { KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", ""); } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; //KRATOS_WATCH(N); if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); double inv_vol = 0.0; if (vol < 0.000000000000000000000000000001) { KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", ""); } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } /////////////////// //using the pre loaded nodal coordinates inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { const double& x0 = nodes_positions[0]; const double& y0 = nodes_positions[1]; const double& z0 = nodes_positions[2]; const double& x1 = nodes_positions[3]; const double& y1 = nodes_positions[4]; const double& z1 = nodes_positions[5]; const double& x2 = nodes_positions[6]; const double& y2 = nodes_positions[7]; const double& z2 = nodes_positions[8]; const double& x3 = nodes_positions[9]; const double& y3 = nodes_positions[10]; const double& z3 = nodes_positions[11]; double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); double inv_vol = 0.0; if (vol < 0.000000000000000000000000000001) { KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", ""); } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } inline double CalculateVol(const double x0, const double y0, const double x1, const double y1, const double x2, const double y2 ) { return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0)); } //*************************************** //*************************************** inline double CalculateVol(const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3 ) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ * 0.1666666666666666666667; } void ComputeGaussPointPositions_4(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N) { double one_third = 1.0 / 3.0; double one_sixt = 0.15; //1.0 / 6.0; double two_third = 0.7; //2.0 * one_third; N(0, 0) = one_sixt; N(0, 1) = one_sixt; N(0, 2) = two_third; N(1, 0) = two_third; N(1, 1) = one_sixt; N(1, 2) = one_sixt; N(2, 0) = one_sixt; N(2, 1) = two_third; N(2, 2) = one_sixt; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; //first pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X(); pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y(); pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z(); //second pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X(); pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y(); pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z(); //third pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X(); pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y(); pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); } void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N) //2d { double one_third = 1.0 / 3.0; double one_eight = 0.12; //1.0 / 6.0; double three_quarters = 0.76; //2.0 * one_third; N(0, 0) = one_eight; N(0, 1) = one_eight; N(0, 2) = three_quarters; N(1, 0) = three_quarters; N(1, 1) = one_eight; N(1, 2) = one_eight; N(2, 0) = one_eight; N(2, 1) = three_quarters; N(2, 2) = one_eight; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; N(4, 0) = one_eight; N(4, 1) = 0.44; N(4, 2) = 0.44; N(5, 0) = 0.44; N(5, 1) = one_eight; N(5, 2) = 0.44; N(6, 0) = 0.44; N(6, 1) = 0.44; N(6, 2) = one_eight; //first pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X(); pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y(); pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z(); //second pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X(); pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y(); pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z(); //third pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X(); pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y(); pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); //fifth pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X(); pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y(); pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z(); //sixth pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X(); pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y(); pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z(); //seventh pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X(); pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y(); pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z(); } void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 9, 3 > & pos,BoundedMatrix<double, 9, 4 > & N) //3D { double one_quarter = 0.25; double small_fraction = 0.1; //1.0 / 6.0; double big_fraction = 0.7; //2.0 * one_third; double mid_fraction = 0.3; //2.0 * one_third; N(0, 0) = big_fraction; N(0, 1) = small_fraction; N(0, 2) = small_fraction; N(0, 3) = small_fraction; N(1, 0) = small_fraction; N(1, 1) = big_fraction; N(1, 2) = small_fraction; N(1, 3) = small_fraction; N(2, 0) = small_fraction; N(2, 1) = small_fraction; N(2, 2) = big_fraction; N(2, 3) = small_fraction; N(3, 0) = small_fraction; N(3, 1) = small_fraction; N(3, 2) = small_fraction; N(3, 3) = big_fraction; N(4, 0) = one_quarter; N(4, 1) = one_quarter; N(4, 2) = one_quarter; N(4, 3) = one_quarter; N(5, 0) = small_fraction; N(5, 1) = mid_fraction; N(5, 2) = mid_fraction; N(5, 3) = mid_fraction; N(6, 0) = mid_fraction; N(6, 1) = small_fraction; N(6, 2) = mid_fraction; N(6, 3) = mid_fraction; N(7, 0) = mid_fraction; N(7, 1) = mid_fraction; N(7, 2) = small_fraction; N(7, 3) = mid_fraction; N(8, 0) = mid_fraction; N(8, 1) = mid_fraction; N(8, 2) = mid_fraction; N(8, 3) = small_fraction; pos=ZeroMatrix(9,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=9; j++) //going through the 9 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 3, 3 > & pos,BoundedMatrix<double, 3, 3 > & N) //2D { N(0, 0) = 0.5; N(0, 1) = 0.25; N(0, 2) = 0.25; N(1, 0) = 0.25; N(1, 1) = 0.5; N(1, 2) = 0.25; N(2, 0) = 0.25; N(2, 1) = 0.25; N(2, 2) = 0.5; //first pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X(); pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y(); pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z(); //second pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X(); pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y(); pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z(); //third pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X(); pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y(); pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z(); } void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 4, 3 > & pos,BoundedMatrix<double, 4, 4 > & N) //3D { //creating 4 particles, each will be closer to a node and equidistant to the other nodes N(0, 0) = 0.4; N(0, 1) = 0.2; N(0, 2) = 0.2; N(0, 3) = 0.2; N(1, 0) = 0.2; N(1, 1) = 0.4; N(1, 2) = 0.2; N(1, 3) = 0.2; N(2, 0) = 0.2; N(2, 1) = 0.2; N(2, 2) = 0.4; N(2, 3) = 0.2; N(3, 0) = 0.2; N(3, 1) = 0.2; N(3, 2) = 0.2; N(3, 3) = 0.4; pos=ZeroMatrix(4,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=4; j++) //going through the 4 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } void ComputeGaussPointPositions_45(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 45, 3 > & pos,BoundedMatrix<double, 45, 3 > & N) { //std::cout << "NEW ELEMENT" << std::endl; unsigned int counter=0; for (unsigned int i=0; i!=9;i++) { for (unsigned int j=0; j!=(9-i);j++) { N(counter,0)=0.05+double(i)*0.1; N(counter,1)=0.05+double(j)*0.1; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 15, 3 > & pos,BoundedMatrix<double, 15, 3 > & N) //2D { //std::cout << "NEW ELEMENT" << std::endl; unsigned int counter=0; for (unsigned int i=0; i!=5;i++) { for (unsigned int j=0; j!=(5-i);j++) { N(counter,0)=0.05+double(i)*0.2; N(counter,1)=0.05+double(j)*0.2; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 20, 3 > & pos,BoundedMatrix<double, 20, 4 > & N) //3D { //std::cout << "NEW ELEMENT" << std::endl; //double total; double fraction_increment; unsigned int counter=0; for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles { //std::cout << "inside i" << i << std::endl; for (unsigned int j=0; j!=(4-i);j++) { //std::cout << "inside j" << j << std::endl; for (unsigned int k=0; k!=(4-i-j);k++) { //std::cout << "inside k" << k << std::endl; N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1) //total = 1.0 - N(counter,0); fraction_increment = 0.27; // N(counter,1)=fraction_increment * (0.175 + double(j)); N(counter,2)=fraction_increment * (0.175 + double(k)); N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } } template<class T> bool InvertMatrix(const T& input, T& inverse) { typedef permutation_matrix<std::size_t> pmatrix; // create a working copy of the input T A(input); // create a permutation matrix for the LU-factorization pmatrix pm(A.size1()); // perform LU-factorization int res = lu_factorize(A, pm); if (res != 0) return false; // create identity matrix of "inverse" inverse.assign(identity_matrix<double> (A.size1())); // backsubstitute to get the inverse lu_substitute(A, pm, inverse); return true; } bool InvertMatrix3x3(const BoundedMatrix<double, TDim+1 , TDim+1 >& A, BoundedMatrix<double, TDim+1 , TDim+1 >& result) { double determinant = +A(0,0)*(A(1,1)*A(2,2)-A(2,1)*A(1,2)) -A(0,1)*(A(1,0)*A(2,2)-A(1,2)*A(2,0)) +A(0,2)*(A(1,0)*A(2,1)-A(1,1)*A(2,0)); double invdet = 1/determinant; result(0,0) = (A(1,1)*A(2,2)-A(2,1)*A(1,2))*invdet; result(1,0) = -(A(0,1)*A(2,2)-A(0,2)*A(2,1))*invdet; result(2,0) = (A(0,1)*A(1,2)-A(0,2)*A(1,1))*invdet; result(0,1) = -(A(1,0)*A(2,2)-A(1,2)*A(2,0))*invdet; result(1,1) = (A(0,0)*A(2,2)-A(0,2)*A(2,0))*invdet; result(2,1) = -(A(0,0)*A(1,2)-A(1,0)*A(0,2))*invdet; result(0,2) = (A(1,0)*A(2,1)-A(2,0)*A(1,1))*invdet; result(1,2) = -(A(0,0)*A(2,1)-A(2,0)*A(0,1))*invdet; result(2,2) = (A(0,0)*A(1,1)-A(1,0)*A(0,1))*invdet; return true; } virtual int Check() { KRATOS_TRY ProcessInfo& rCurrentProcessInfo = mr_model_part.GetProcessInfo(); if (rCurrentProcessInfo.Has(CONVECTION_DIFFUSION_SETTINGS)==false) KRATOS_THROW_ERROR(std::logic_error, "no CONVECTION_DIFFUSION_SETTINGS in model_part", ""); //std::cout << "ConvDiff::Check(). If crashes, check CONVECTION_DIFFUSION_SETTINGS is defined" << std::endl; ConvectionDiffusionSettings::Pointer my_settings = rCurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS); //UNKNOWN VARIABLE if(my_settings->IsDefinedUnknownVariable()==true) { if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetUnknownVariable()) == false) KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Unknown Variable defined but not contained in the model part", ""); } else KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Unknown Variable not defined!", ""); //PROJECTION VARIABLE //used as intermediate variable, is the variable at time n+1 but only accounting for the convective term. if(my_settings->IsDefinedProjectionVariable()==true) { if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetProjectionVariable()) == false) KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Projection Variable defined but not contained in the model part", ""); } else KRATOS_THROW_ERROR(std::logic_error, "No Projection variable assigned for ConvDiff!", ""); //CONVECTION VELOCITY VARIABLE //CURRENTLY WE ARE USING (VELOCITY -MESH_VELOCITY) TO CONVECT, so the ConvectionVariable must not be used: //if(my_settings->IsDefinedConvectionVariable()==true) //{ // if (BaseType::GetModelPart().NodesBegin()->SolutionStepsDataHas(my_settings->GetConvectionVariable()) == false) // KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Convection Variable defined but not contained in the model part", ""); //} //else // std::cout << "No Projection variable assigned for ConvDiff. Assuming Convection=0" << std::endl; if(my_settings->IsDefinedConvectionVariable()==true) KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: ConvectionVariable not used. Use VelocityVariable instead", ""); //VELOCITY VARIABLE if(my_settings->IsDefinedVelocityVariable()==true) { if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetVelocityVariable()) == false) KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Velocity Variable defined but not contained in the model part", ""); } else KRATOS_THROW_ERROR(std::logic_error, "No Velocity variable assigned for ConvDiff!", ""); if (mr_model_part.NodesBegin()->SolutionStepsDataHas(MEAN_SIZE) == false) KRATOS_THROW_ERROR(std::logic_error, "Add MEAN_SIZE variable to model part!", ""); if (mr_model_part.NodesBegin()->SolutionStepsDataHas(DELTA_SCALAR1) == false) KRATOS_THROW_ERROR(std::logic_error, "Add DELTA_SCALAR1 variable to model part!", ""); return 0; KRATOS_CATCH("") } ModelPart& mr_model_part; int m_nparticles; int mnelems; int moffset; //vector<double> mareas_vector; UNUSED SO COMMENTED int max_nsubsteps; double max_substep_dt; int mmaximum_number_of_particles; std::vector< Convection_Particle > mparticles_vector; //Point<3> int mlast_elem_id; bool modd_timestep; bool mparticle_printing_tool_initialized; unsigned int mfilter_factor; unsigned int mlast_node_id; //ModelPart& mr_particle_model_part; vector<int> mnumber_of_particles_in_elems; vector<int> mnumber_of_particles_in_elems_aux; //vector<ParticlePointerVector*> mpointers_to_particle_pointers_vectors; //pointing to the GetValue of each element vector<ParticlePointerVector> mvector_of_particle_pointers_vectors; typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic; const Variable<double>& mUnknownVar; const Variable<double>& mProjectionVar; const Variable<array_1d<double,3> >& mVelocityVar; const Variable<array_1d<double,3> >& mMeshVelocityVar; }; } // namespace Kratos. #endif // KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED defined
convolution_sgemm_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt) { #if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ if (ncnn::cpu_support_x86_avx512_vnni()) { extern void im2col_sgemm_int8_sse_avx512vnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); im2col_sgemm_int8_sse_avx512vnni(bottom_im2col, top_blob, kernel, opt); return; } #endif #if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ if (ncnn::cpu_support_x86_avx_vnni()) { extern void im2col_sgemm_int8_sse_avxvnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); im2col_sgemm_int8_sse_avxvnni(bottom_im2col, top_blob, kernel, opt); return; } #endif #if NCNN_AVX2 && __AVX__ && !__AVX2__ if (ncnn::cpu_support_x86_avx2()) { extern void im2col_sgemm_int8_sse_avx2(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); im2col_sgemm_int8_sse_avx2(bottom_im2col, top_blob, kernel, opt); return; } #endif #if NCNN_XOP && __SSE2__ && !__XOP__ if (ncnn::cpu_support_x86_xop()) { extern void im2col_sgemm_int8_sse_xop(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); im2col_sgemm_int8_sse_xop(bottom_im2col, top_blob, kernel, opt); return; } #endif // Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; // permute Mat tmp; #if __SSE2__ if (inch >= 4) { #if __AVX2__ if (size >= 4) tmp.create(4 * maxk, inch / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator); else tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator); #else if (size >= 2) tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator); else tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator); #endif } else { #if __AVX2__ if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); #else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); #endif } { #if __AVX2__ int remain_size_start = 0; int nn_size = size >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; signed char* tmpptr = tmp.channel(i / 4); int q = 0; for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr[8] = img0[2]; tmpptr[9] = img1[2]; tmpptr[10] = img2[2]; tmpptr[11] = img3[2]; tmpptr[12] = img0[3]; tmpptr[13] = img1[3]; tmpptr[14] = img2[3]; tmpptr[15] = img3[3]; tmpptr += 16; img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += size; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #else int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 1; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; #if __AVX2__ signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #else signed char* tmpptr = tmp.channel(i / 2); #endif int q = 0; for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += size; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { #if __AVX2__ signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #else signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif int q = 0; for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += size; } } } } #else // __SSE2__ tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); { #pragma omp parallel for num_threads(opt.num_threads) for (int i = 0; i < size; i++) { signed char* tmpptr = tmp.channel(i); int q = 0; for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += size; } } } } #endif // __SSE2__ int nn_outch = 0; int remain_outch_start = 0; #if __SSE2__ nn_outch = outch >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; int* outptr0 = top_blob.channel(p); int* outptr1 = top_blob.channel(p + 1); int* outptr2 = top_blob.channel(p + 2); int* outptr3 = top_blob.channel(p + 3); int i = 0; #if __AVX2__ for (; i + 3 < size; i += 4) { const signed char* tmpptr = tmp.channel(i / 4); const signed char* kptr0 = kernel.channel(p / 4); int nn4 = (inch / 4) * maxk; int nn1 = (inch % 4) * maxk; __m256i _sum00_12 = _mm256_setzero_si256(); __m256i _sum20_32 = _mm256_setzero_si256(); if (nn4 > 0) { #if __AVXVNNI__ || __AVX512VNNI__ __m256i _sum10_02 = _mm256_setzero_si256(); __m256i _sum30_22 = _mm256_setzero_si256(); #else __m256i _sum10_02 = _mm256_setzero_si256(); __m256i _sum01_13 = _mm256_setzero_si256(); __m256i _sum11_03 = _mm256_setzero_si256(); __m256i _sum30_22 = _mm256_setzero_si256(); __m256i _sum21_33 = _mm256_setzero_si256(); __m256i _sum31_23 = _mm256_setzero_si256(); #endif int j = 0; for (; j < nn4; j++) { __m128i _val0123 = _mm_loadu_si128((const __m128i*)tmpptr); __m256i _val0123_16 = _mm256_cvtepi8_epi16(_val0123); __m256i _val01_16 = _mm256_permute4x64_epi64(_val0123_16, _MM_SHUFFLE(1, 1, 0, 0)); __m256i _val23_16 = _mm256_permute4x64_epi64(_val0123_16, _MM_SHUFFLE(3, 3, 2, 2)); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m256i _w01_16 = _mm256_cvtepi8_epi16(_w01); __m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78); __m256i _val32_16 = _mm256_permute4x64_epi64(_val23_16, 78); #if __AVXVNNI__ || __AVX512VNNI__ _sum00_12 = _mm256_dpwssd_epi32(_sum00_12, _val01_16, _w01_16); _sum10_02 = _mm256_dpwssd_epi32(_sum10_02, _val10_16, _w01_16); _sum20_32 = _mm256_dpwssd_epi32(_sum20_32, _val23_16, _w01_16); _sum30_22 = _mm256_dpwssd_epi32(_sum30_22, _val32_16, _w01_16); #else __m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16); __m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16); __m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16); __m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16); __m256i _sl20_31 = _mm256_mullo_epi16(_val23_16, _w01_16); __m256i _sh20_31 = _mm256_mulhi_epi16(_val23_16, _w01_16); __m256i _sl30_21 = _mm256_mullo_epi16(_val32_16, _w01_16); __m256i _sh30_21 = _mm256_mulhi_epi16(_val32_16, _w01_16); _sum00_12 = _mm256_add_epi32(_sum00_12, _mm256_unpacklo_epi16(_sl00_11, _sh00_11)); _sum10_02 = _mm256_add_epi32(_sum10_02, _mm256_unpacklo_epi16(_sl10_01, _sh10_01)); _sum01_13 = _mm256_add_epi32(_sum01_13, _mm256_unpackhi_epi16(_sl00_11, _sh00_11)); _sum11_03 = _mm256_add_epi32(_sum11_03, _mm256_unpackhi_epi16(_sl10_01, _sh10_01)); _sum20_32 = _mm256_add_epi32(_sum20_32, _mm256_unpacklo_epi16(_sl20_31, _sh20_31)); _sum30_22 = _mm256_add_epi32(_sum30_22, _mm256_unpacklo_epi16(_sl30_21, _sh30_21)); _sum21_33 = _mm256_add_epi32(_sum21_33, _mm256_unpackhi_epi16(_sl20_31, _sh20_31)); _sum31_23 = _mm256_add_epi32(_sum31_23, _mm256_unpackhi_epi16(_sl30_21, _sh30_21)); #endif tmpptr += 16; kptr0 += 16; } #if __AVXVNNI__ || __AVX512VNNI__ _sum00_12 = _mm256_hadd_epi32(_sum00_12, _sum10_02); _sum20_32 = _mm256_hadd_epi32(_sum20_32, _sum30_22); _sum00_12 = _mm256_permute4x64_epi64(_sum00_12, _MM_SHUFFLE(2, 1, 3, 0)); _sum20_32 = _mm256_permute4x64_epi64(_sum20_32, _MM_SHUFFLE(2, 1, 3, 0)); #else // transpose 4x8 { __m256i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm256_unpacklo_epi32(_sum00_12, _sum10_02); _tmp1 = _mm256_unpacklo_epi32(_sum01_13, _sum11_03); _tmp2 = _mm256_unpackhi_epi32(_sum00_12, _sum10_02); _tmp3 = _mm256_unpackhi_epi32(_sum01_13, _sum11_03); _sum00_12 = _mm256_unpacklo_epi64(_tmp0, _tmp1); _sum10_02 = _mm256_unpackhi_epi64(_tmp0, _tmp1); _sum01_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3); _sum11_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3); } { __m256i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm256_unpacklo_epi32(_sum20_32, _sum30_22); _tmp1 = _mm256_unpacklo_epi32(_sum21_33, _sum31_23); _tmp2 = _mm256_unpackhi_epi32(_sum20_32, _sum30_22); _tmp3 = _mm256_unpackhi_epi32(_sum21_33, _sum31_23); _sum20_32 = _mm256_unpacklo_epi64(_tmp0, _tmp1); _sum30_22 = _mm256_unpackhi_epi64(_tmp0, _tmp1); _sum21_33 = _mm256_unpacklo_epi64(_tmp2, _tmp3); _sum31_23 = _mm256_unpackhi_epi64(_tmp2, _tmp3); } _sum00_12 = _mm256_add_epi32(_sum00_12, _sum10_02); _sum01_13 = _mm256_add_epi32(_sum01_13, _sum11_03); _sum00_12 = _mm256_add_epi32(_sum00_12, _sum01_13); _sum20_32 = _mm256_add_epi32(_sum20_32, _sum30_22); _sum21_33 = _mm256_add_epi32(_sum21_33, _sum31_23); _sum20_32 = _mm256_add_epi32(_sum20_32, _sum21_33); __m256i _perm_mask = _mm256_set_epi32(6, 4, 3, 1, 7, 5, 2, 0); _sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask); _sum20_32 = _mm256_permutevar8x32_epi32(_sum20_32, _perm_mask); #endif } __m128i _sum00 = _mm256_extracti128_si256(_sum00_12, 0); __m128i _sum10 = _mm256_extracti128_si256(_sum00_12, 1); __m128i _sum20 = _mm256_extracti128_si256(_sum20_32, 0); __m128i _sum30 = _mm256_extracti128_si256(_sum20_32, 1); int j = 0; for (; j < nn1; j++) { __m128i _val0123 = _mm_loadl_epi64((const __m128i*)tmpptr); #if __SSE4_1__ _val0123 = _mm_cvtepi8_epi16(_val0123); #else __m128i _extval0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val0123); _val0123 = _mm_unpacklo_epi8(_val0123, _extval0123); #endif __m128i _val01 = _mm_shufflelo_epi16(_val0123, _MM_SHUFFLE(1, 1, 0, 0)); _val01 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(1, 1, 0, 0)); __m128i _val23 = _mm_shufflelo_epi16(_val0123, _MM_SHUFFLE(3, 3, 2, 2)); _val23 = _mm_shuffle_epi32(_val23, _MM_SHUFFLE(1, 1, 0, 0)); __m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0); #if __SSE4_1__ _w0123 = _mm_cvtepi8_epi16(_w0123); #else __m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123); _w0123 = _mm_unpacklo_epi8(_w0123, _extw0123); #endif _w0123 = _mm_shuffle_epi32(_w0123, _MM_SHUFFLE(1, 0, 1, 0)); __m128i _sl00 = _mm_mullo_epi16(_val01, _w0123); __m128i _sh00 = _mm_mulhi_epi16(_val01, _w0123); __m128i _sl10 = _mm_mullo_epi16(_val23, _w0123); __m128i _sh10 = _mm_mulhi_epi16(_val23, _w0123); _sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00)); _sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl00, _sh00)); _sum20 = _mm_add_epi32(_sum20, _mm_unpacklo_epi16(_sl10, _sh10)); _sum30 = _mm_add_epi32(_sum30, _mm_unpackhi_epi16(_sl10, _sh10)); tmpptr += 4; kptr0 += 4; } // transpose 4x4 { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum00, _sum10); _tmp1 = _mm_unpacklo_epi32(_sum20, _sum30); _tmp2 = _mm_unpackhi_epi32(_sum00, _sum10); _tmp3 = _mm_unpackhi_epi32(_sum20, _sum30); _sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum10 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum20 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum30 = _mm_unpackhi_epi64(_tmp2, _tmp3); } _mm_storeu_si128((__m128i*)outptr0, _sum00); _mm_storeu_si128((__m128i*)outptr1, _sum10); _mm_storeu_si128((__m128i*)outptr2, _sum20); _mm_storeu_si128((__m128i*)outptr3, _sum30); outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; } #endif for (; i + 1 < size; i += 2) { #if __AVX2__ const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #else const signed char* tmpptr = tmp.channel(i / 2); #endif const signed char* kptr0 = kernel.channel(p / 4); int nn4 = (inch / 4) * maxk; int nn1 = (inch % 4) * maxk; #if __AVX2__ __m256i _sum00_12 = _mm256_setzero_si256(); #else __m128i _sum00 = _mm_setzero_si128(); __m128i _sum10 = _mm_setzero_si128(); #endif if (nn4 > 0) { #if __AVX2__ #if __AVXVNNI__ || __AVX512VNNI__ __m256i _sum10_02 = _mm256_setzero_si256(); #else __m256i _sum10_02 = _mm256_setzero_si256(); __m256i _sum01_13 = _mm256_setzero_si256(); __m256i _sum11_03 = _mm256_setzero_si256(); #endif #else #if __XOP__ __m128i _sum01 = _mm_setzero_si128(); __m128i _sum11 = _mm_setzero_si128(); #else __m128i _sum01 = _mm_setzero_si128(); __m128i _sum02 = _mm_setzero_si128(); __m128i _sum03 = _mm_setzero_si128(); __m128i _sum11 = _mm_setzero_si128(); __m128i _sum12 = _mm_setzero_si128(); __m128i _sum13 = _mm_setzero_si128(); #endif #endif int j = 0; for (; j < nn4; j++) { #if __AVX2__ __m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr); __m256i _val01_16 = _mm256_cvtepi8_epi16(_val01); _val01_16 = _mm256_permute4x64_epi64(_val01_16, _MM_SHUFFLE(1, 1, 0, 0)); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m256i _w01_16 = _mm256_cvtepi8_epi16(_w01); __m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78); #if __AVXVNNI__ || __AVX512VNNI__ _sum00_12 = _mm256_dpwssd_epi32(_sum00_12, _val01_16, _w01_16); _sum10_02 = _mm256_dpwssd_epi32(_sum10_02, _val10_16, _w01_16); #else __m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16); __m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16); __m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16); __m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16); _sum00_12 = _mm256_add_epi32(_sum00_12, _mm256_unpacklo_epi16(_sl00_11, _sh00_11)); _sum10_02 = _mm256_add_epi32(_sum10_02, _mm256_unpacklo_epi16(_sl10_01, _sh10_01)); _sum01_13 = _mm256_add_epi32(_sum01_13, _mm256_unpackhi_epi16(_sl00_11, _sh00_11)); _sum11_03 = _mm256_add_epi32(_sum11_03, _mm256_unpackhi_epi16(_sl10_01, _sh10_01)); #endif #else __m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr); #if __SSE4_1__ _val01 = _mm_cvtepi8_epi16(_val01); #else __m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01); _val01 = _mm_unpacklo_epi8(_val01, _extval01); #endif __m128i _val0 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(1, 0, 1, 0)); __m128i _val1 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(3, 2, 3, 2)); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01); __m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01); __m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01); #if __XOP__ _sum00 = _mm_maddd_epi16(_val0, _w0, _sum00); _sum01 = _mm_maddd_epi16(_val0, _w1, _sum01); _sum10 = _mm_maddd_epi16(_val1, _w0, _sum10); _sum11 = _mm_maddd_epi16(_val1, _w1, _sum11); #else __m128i _sl00 = _mm_mullo_epi16(_val0, _w0); __m128i _sh00 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl01 = _mm_mullo_epi16(_val0, _w1); __m128i _sh01 = _mm_mulhi_epi16(_val0, _w1); __m128i _sl10 = _mm_mullo_epi16(_val1, _w0); __m128i _sh10 = _mm_mulhi_epi16(_val1, _w0); __m128i _sl11 = _mm_mullo_epi16(_val1, _w1); __m128i _sh11 = _mm_mulhi_epi16(_val1, _w1); _sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00)); _sum01 = _mm_add_epi32(_sum01, _mm_unpackhi_epi16(_sl00, _sh00)); _sum02 = _mm_add_epi32(_sum02, _mm_unpacklo_epi16(_sl01, _sh01)); _sum03 = _mm_add_epi32(_sum03, _mm_unpackhi_epi16(_sl01, _sh01)); _sum10 = _mm_add_epi32(_sum10, _mm_unpacklo_epi16(_sl10, _sh10)); _sum11 = _mm_add_epi32(_sum11, _mm_unpackhi_epi16(_sl10, _sh10)); _sum12 = _mm_add_epi32(_sum12, _mm_unpacklo_epi16(_sl11, _sh11)); _sum13 = _mm_add_epi32(_sum13, _mm_unpackhi_epi16(_sl11, _sh11)); #endif #endif tmpptr += 8; kptr0 += 16; } #if __AVX2__ #if __AVXVNNI__ || __AVX512VNNI__ _sum00_12 = _mm256_hadd_epi32(_sum00_12, _sum10_02); _sum00_12 = _mm256_permute4x64_epi64(_sum00_12, _MM_SHUFFLE(2, 1, 3, 0)); #else // transpose 4x8 { __m256i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm256_unpacklo_epi32(_sum00_12, _sum10_02); _tmp1 = _mm256_unpacklo_epi32(_sum01_13, _sum11_03); _tmp2 = _mm256_unpackhi_epi32(_sum00_12, _sum10_02); _tmp3 = _mm256_unpackhi_epi32(_sum01_13, _sum11_03); _sum00_12 = _mm256_unpacklo_epi64(_tmp0, _tmp1); _sum10_02 = _mm256_unpackhi_epi64(_tmp0, _tmp1); _sum01_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3); _sum11_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3); } _sum00_12 = _mm256_add_epi32(_sum00_12, _sum10_02); _sum01_13 = _mm256_add_epi32(_sum01_13, _sum11_03); _sum00_12 = _mm256_add_epi32(_sum00_12, _sum01_13); __m256i _perm_mask = _mm256_set_epi32(6, 4, 3, 1, 7, 5, 2, 0); _sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask); #endif #else #if __XOP__ _sum00 = _mm_hadd_epi32(_sum00, _sum01); _sum10 = _mm_hadd_epi32(_sum10, _sum11); #else // transpose 4x4 { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum00, _sum01); _tmp1 = _mm_unpacklo_epi32(_sum02, _sum03); _tmp2 = _mm_unpackhi_epi32(_sum00, _sum01); _tmp3 = _mm_unpackhi_epi32(_sum02, _sum03); _sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum01 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum02 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum03 = _mm_unpackhi_epi64(_tmp2, _tmp3); } { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum10, _sum11); _tmp1 = _mm_unpacklo_epi32(_sum12, _sum13); _tmp2 = _mm_unpackhi_epi32(_sum10, _sum11); _tmp3 = _mm_unpackhi_epi32(_sum12, _sum13); _sum10 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum11 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum12 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum13 = _mm_unpackhi_epi64(_tmp2, _tmp3); } _sum00 = _mm_add_epi32(_sum00, _sum01); _sum02 = _mm_add_epi32(_sum02, _sum03); _sum10 = _mm_add_epi32(_sum10, _sum11); _sum12 = _mm_add_epi32(_sum12, _sum13); _sum00 = _mm_add_epi32(_sum00, _sum02); _sum10 = _mm_add_epi32(_sum10, _sum12); #endif #endif } #if __AVX2__ __m128i _sum00 = _mm256_extracti128_si256(_sum00_12, 0); __m128i _sum10 = _mm256_extracti128_si256(_sum00_12, 1); #endif int j = 0; for (; j < nn1; j++) { __m128i _val = _mm_set_epi16(tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[0], tmpptr[0], tmpptr[0], tmpptr[0]); // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99754 // gcc incorrectly put 32bit to tail with _mm_loadu_si32 :( // 0 1 2 3 x x x x x x x x x x x x // x x x x x x x x x x x x 0 1 2 3 // __m128i _w0123 = _mm_loadu_si32(kptr0); __m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0); #if __SSE4_1__ _w0123 = _mm_cvtepi8_epi16(_w0123); #else __m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123); _w0123 = _mm_unpacklo_epi8(_w0123, _extw0123); #endif _w0123 = _mm_shuffle_epi32(_w0123, _MM_SHUFFLE(1, 0, 1, 0)); __m128i _sl00 = _mm_mullo_epi16(_val, _w0123); __m128i _sh00 = _mm_mulhi_epi16(_val, _w0123); _sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00)); _sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl00, _sh00)); tmpptr += 2; kptr0 += 4; } int sum[8]; _mm_storeu_si128((__m128i*)sum, _sum00); _mm_storeu_si128((__m128i*)(sum + 4), _sum10); outptr0[0] = sum[0]; outptr1[0] = sum[1]; outptr2[0] = sum[2]; outptr3[0] = sum[3]; outptr0[1] = sum[4]; outptr1[1] = sum[5]; outptr2[1] = sum[6]; outptr3[1] = sum[7]; outptr0 += 2; outptr1 += 2; outptr2 += 2; outptr3 += 2; } for (; i < size; i++) { #if __AVX2__ const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #else const signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char* kptr0 = kernel.channel(p / 4); int nn4 = (inch / 4) * maxk; int nn1 = (inch % 4) * maxk; __m128i _sum0 = _mm_setzero_si128(); if (nn4 > 0) { __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); int j = 0; for (; j < nn4; j++) { __m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr); #if __SSE4_1__ __m128i _val0 = _mm_cvtepi8_epi16(_val01); #else __m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01); __m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01); #endif _val0 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(1, 0, 1, 0)); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01); __m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01); __m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01); __m128i _sl00 = _mm_mullo_epi16(_val0, _w0); __m128i _sh00 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl01 = _mm_mullo_epi16(_val0, _w1); __m128i _sh01 = _mm_mulhi_epi16(_val0, _w1); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl01, _sh01)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl01, _sh01)); tmpptr += 4; kptr0 += 16; } // transpose 4x4 { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum0, _sum1); _tmp1 = _mm_unpacklo_epi32(_sum2, _sum3); _tmp2 = _mm_unpackhi_epi32(_sum0, _sum1); _tmp3 = _mm_unpackhi_epi32(_sum2, _sum3); _sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3); } _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); _sum0 = _mm_add_epi32(_sum0, _sum2); } int j = 0; for (; j < nn1; j++) { __m128i _val = _mm_set1_epi16(tmpptr[0]); __m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0); #if __SSE4_1__ _w0123 = _mm_cvtepi8_epi16(_w0123); #else __m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123); _w0123 = _mm_unpacklo_epi8(_w0123, _extw0123); #endif __m128i _sl00 = _mm_mullo_epi16(_val, _w0123); __m128i _sh00 = _mm_mulhi_epi16(_val, _w0123); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00)); tmpptr += 1; kptr0 += 4; } int sum[4]; _mm_storeu_si128((__m128i*)sum, _sum0); outptr0[0] = sum[0]; outptr1[0] = sum[1]; outptr2[0] = sum[2]; outptr3[0] = sum[3]; outptr0 += 1; outptr1 += 1; outptr2 += 1; outptr3 += 1; } } remain_outch_start += nn_outch << 2; #endif // __SSE2__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { int* outptr0 = top_blob.channel(p); int i = 0; #if __SSE2__ #if __AVX2__ for (; i + 3 < size; i += 4) { const signed char* tmpptr = tmp.channel(i / 4); const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn4 = (inch / 4) * maxk; int nn1 = (inch % 4) * maxk; int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; if (nn4 > 0) { __m256i _sum0_2 = _mm256_setzero_si256(); __m256i _sum1_3 = _mm256_setzero_si256(); int j = 0; for (; j < nn4; j++) { __m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr); __m256i _val01_16 = _mm256_cvtepi8_epi16(_val01); __m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0); __m128i _w = _mm_cvtepi8_epi16(_w0123); _w = _mm_unpacklo_epi64(_w, _w); __m256i _ww = _mm256_inserti128_si256(_mm256_castsi128_si256(_w), _w, 1); __m256i _sl0_1 = _mm256_mullo_epi16(_val01_16, _ww); __m256i _sh0_1 = _mm256_mulhi_epi16(_val01_16, _ww); _sum0_2 = _mm256_add_epi32(_sum0_2, _mm256_unpacklo_epi16(_sl0_1, _sh0_1)); _sum1_3 = _mm256_add_epi32(_sum1_3, _mm256_unpackhi_epi16(_sl0_1, _sh0_1)); tmpptr += 16; kptr0 += 4; } __m128i _sum0 = _mm256_extracti128_si256(_sum0_2, 0); __m128i _sum1 = _mm256_extracti128_si256(_sum1_3, 0); __m128i _sum2 = _mm256_extracti128_si256(_sum0_2, 1); __m128i _sum3 = _mm256_extracti128_si256(_sum1_3, 1); sum0 = _mm_reduce_add_epi32(_sum0); sum1 = _mm_reduce_add_epi32(_sum1); sum2 = _mm_reduce_add_epi32(_sum2); sum3 = _mm_reduce_add_epi32(_sum3); } int j = 0; for (; j < nn1; j++) { signed char val0 = tmpptr[0]; signed char val1 = tmpptr[1]; signed char val2 = tmpptr[2]; signed char val3 = tmpptr[3]; signed char w = kptr0[0]; sum0 += val0 * w; sum1 += val1 * w; sum2 += val2 * w; sum3 += val3 * w; tmpptr += 4; kptr0 += 1; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0 += 4; } #endif for (; i + 1 < size; i += 2) { #if __AVX2__ const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #else const signed char* tmpptr = tmp.channel(i / 2); #endif const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn4 = (inch / 4) * maxk; int nn1 = (inch % 4) * maxk; int sum0 = 0; int sum1 = 0; if (nn4 > 0) { __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); int j = 0; for (; j < nn4; j++) { __m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr); __m128i _extval = _mm_cmpgt_epi8(_mm_setzero_si128(), _val); __m128i _val01 = _mm_unpacklo_epi8(_val, _extval); __m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0); #if __SSE4_1__ __m128i _w = _mm_cvtepi8_epi16(_w0123); #else __m128i _extw = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123); __m128i _w = _mm_unpacklo_epi8(_w0123, _extw); #endif _w = _mm_shuffle_epi32(_w, _MM_SHUFFLE(1, 0, 1, 0)); __m128i _sl01 = _mm_mullo_epi16(_val01, _w); __m128i _sh01 = _mm_mulhi_epi16(_val01, _w); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl01, _sh01)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl01, _sh01)); tmpptr += 8; kptr0 += 4; } sum0 = _mm_reduce_add_epi32(_sum0); sum1 = _mm_reduce_add_epi32(_sum1); } int j = 0; for (; j < nn1; j++) { signed char val0 = tmpptr[0]; signed char val1 = tmpptr[1]; signed char w = kptr0[0]; sum0 += val0 * w; sum1 += val1 * w; tmpptr += 2; kptr0 += 1; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0 += 2; } for (; i < size; i++) { #if __AVX2__ const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #else const signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn4 = (inch / 4) * maxk; int nn1 = (inch % 4) * maxk; int sum = 0; if (nn4 > 0) { __m128i _sum = _mm_setzero_si128(); int j = 0; for (; j < nn4; j++) { __m128i _val0123 = _mm_loadl_epi64((const __m128i*)tmpptr); #if __SSE4_1__ __m128i _val = _mm_cvtepi8_epi16(_val0123); #else __m128i _extval = _mm_cmpgt_epi8(_mm_setzero_si128(), _val0123); __m128i _val = _mm_unpacklo_epi8(_val0123, _extval); #endif __m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0); #if __SSE4_1__ __m128i _w = _mm_cvtepi8_epi16(_w0123); #else __m128i _extw = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123); __m128i _w = _mm_unpacklo_epi8(_w0123, _extw); #endif __m128i _sl = _mm_mullo_epi16(_val, _w); __m128i _sh = _mm_mulhi_epi16(_val, _w); _sum = _mm_add_epi32(_sum, _mm_unpacklo_epi16(_sl, _sh)); tmpptr += 4; kptr0 += 4; } sum = _mm_reduce_add_epi32(_sum); } int j = 0; for (; j < nn1; j++) { signed char val = tmpptr[0]; signed char w = kptr0[0]; sum += val * w; tmpptr += 1; kptr0 += 1; } outptr0[0] = sum; outptr0 += 1; } #else // __SSE2__ for (; i < size; i++) { const signed char* tmpptr = tmp.channel(i); const signed char* kptr0 = kernel.channel(p); int nn1 = inch * maxk; int sum = 0; int j = 0; for (; j < nn1; j++) { signed char val = tmpptr[0]; signed char w = kptr0[0]; sum += val * w; tmpptr += 1; kptr0 += 1; } outptr0[0] = sum; outptr0 += 1; } #endif // __SSE2__ } } static void convolution_im2col_sgemm_transform_kernel_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; #if __SSE2__ // interleave // src = maxk-inch-outch // dst = 4a-4b-maxk-inch/4a-outch/4b Mat kernel = _kernel.reshape(maxk, inch, outch); if (outch >= 4) { if (inch >= 4) kernel_tm.create(16 * maxk, inch / 4 + inch % 4, outch / 4 + outch % 4, (size_t)1u); else kernel_tm.create(4 * maxk, inch, outch / 4 + outch % 4, (size_t)1u); } else { if (inch >= 4) kernel_tm.create(4 * maxk, inch / 4 + inch % 4, outch, (size_t)1u); else kernel_tm.create(1 * maxk, inch, outch, (size_t)1u); } int q = 0; for (; q + 3 < outch; q += 4) { signed char* g00 = kernel_tm.channel(q / 4); int p = 0; for (; p + 3 < inch; p += 4) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } } } for (; p < inch; p++) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p); g00[0] = k00[k]; g00++; } } } } // TODO unroll 2 for (; q < outch; q++) { signed char* g00 = kernel_tm.channel(q / 4 + q % 4); int p = 0; for (; p + 3 < inch; p += 4) { for (int k = 0; k < maxk; k++) { for (int j = 0; j < 4; j++) { const signed char* k00 = kernel.channel(q).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } } for (; p < inch; p++) { for (int k = 0; k < maxk; k++) { const signed char* k00 = kernel.channel(q).row<const signed char>(p); g00[0] = k00[k]; g00++; } } } #else // __SSE2__ kernel_tm = _kernel.reshape(maxk, inch, outch); #endif // __SSE2__ } static void convolution_im2col_sgemm_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator); { const int gap = w * stride_h - outw * stride_w; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); signed char* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const signed char* sptr = img.row<const signed char>(dilation_h * u) + dilation_w * v; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; ptr[2] = sptr[stride_w * 2]; ptr[3] = sptr[stride_w * 3]; sptr += stride_w * 4; ptr += 4; } for (; j + 1 < outw; j += 2) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; sptr += stride_w * 2; ptr += 2; } for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += stride_w; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_int8_sse(bottom_im2col, top_blob, kernel, opt); }
cpd_omp.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <HiParTI.h> #include <assert.h> #include <math.h> #ifdef HIPARTI_USE_MAGMA #include "magma_v2.h" #include "magma_lapack.h" #else #include "clapack.h" #endif #include "sptensor.h" double OmpCpdAlsStep( ptiSparseTensor const * const ptien, ptiIndex const rank, ptiIndex const niters, double const tol, const int tk, const int use_reduce, ptiMatrix ** mats, // Row-major ptiMatrix ** copy_mats, ptiValue * const lambda) { ptiIndex const nmodes = ptien->nmodes; ptiIndex const stride = mats[0]->stride; double fit = 0; #ifdef HIPARTI_USE_OPENMP omp_set_num_threads(tk); #endif for(ptiIndex m=0; m < nmodes; ++m) { ptiAssert(ptien->ndims[m] == mats[m]->nrows); ptiAssert(mats[m]->ncols == rank); // assert(mats[m]->stride == rank); // for correct column-major magma functions } ptiValue alpha = 1.0, beta = 0.0; char notrans = 'N'; char trans = 'T'; char uplo = 'L'; int blas_rank = (int) rank; int blas_stride = (int) stride; ptiMatrix * tmp_mat = mats[nmodes]; ptiMatrix ** ata = (ptiMatrix **)malloc((nmodes+1) * sizeof(*ata)); // symmetric matrices, but in column-major for(ptiIndex m=0; m < nmodes+1; ++m) { ata[m] = (ptiMatrix *)malloc(sizeof(ptiMatrix)); ptiAssert(ptiNewMatrix(ata[m], rank, rank) == 0); ptiAssert(mats[m]->stride == ata[m]->stride); } /* Compute all "ata"s */ for(ptiIndex m=0; m < nmodes; ++m) { /* ata[m] = mats[m]^T * mats[m]), actually do A * A' due to row-major mats, and output an upper triangular matrix. */ int blas_nrows = (int)(mats[m]->nrows); ssyrk_(&uplo, &notrans, &blas_rank, &blas_nrows, &alpha, mats[m]->values, &blas_stride, &beta, ata[m]->values, &blas_stride); } // printf("Initial mats:\n"); // for(size_t m=0; m < nmodes+1; ++m) // ptiDumpMatrix(mats[m], stdout); // printf("Initial ata:\n"); // for(size_t m=0; m < nmodes+1; ++m) // ptiDumpMatrix(ata[m], stdout); double oldfit = 0; ptiIndex * mats_order = (ptiIndex*)malloc(nmodes * sizeof(*mats_order)); for(ptiIndex it=0; it < niters; ++it) { // printf(" its = %3lu\n", it+1); ptiTimer timer; ptiNewTimer(&timer, 0); ptiStartTimer(timer); for(ptiIndex m=0; m < nmodes; ++m) { // printf("\nmode %lu \n", m); tmp_mat->nrows = mats[m]->nrows; /* Factor Matrices order */ mats_order[0] = m; for(ptiIndex i=1; i<nmodes; ++i) mats_order[i] = (m+i) % nmodes; // mats[nmodes]: row-major if(use_reduce == 1) { ptiAssert (ptiOmpMTTKRP_Reduce(ptien, mats, copy_mats, mats_order, m, tk) == 0); } else { ptiAssert (ptiOmpMTTKRP(ptien, mats, mats_order, m, tk) == 0); } // printf("ptiMTTKRP mats[nmodes]:\n"); // ptiDumpMatrix(mats[nmodes], stdout); // Row-major #ifdef HIPARTI_USE_OPENMP #pragma omp parallel for num_threads(tk) #endif for(ptiIndex i=0; i<mats[m]->nrows * stride; ++i) mats[m]->values[i] = tmp_mat->values[i]; // for(ptiIndex i=0; i<mats[m]->nrows; ++i) { // for(ptiIndex j=0; j<mats[m]->ncols; ++j) { // mats[m]->values[i * mats[m]->stride + j] = tmp_mat->values[i * mats[m]->stride + j]; // } // } /* Solve ? * ata[nmodes] = mats[nmodes] (tmp_mat) */ ptiAssert ( ptiMatrixSolveNormals(m, nmodes, ata, mats[m]) == 0 ); // printf("Inverse mats[m]:\n"); // ptiDumpMatrix(mats[m], stdout); /* Normalized mats[m], store the norms in lambda. Use different norms to avoid precision explosion. */ if (it == 0 ) { ptiMatrix2Norm(mats[m], lambda); } else { ptiMatrixMaxNorm(mats[m], lambda); } // printf("Normalize mats[m]:\n"); // ptiDumpMatrix(mats[m], stdout); // printf("lambda:\n"); // for(size_t i=0; i<rank; ++i) // printf("%lf ", lambda[i]); // printf("\n\n"); /* ata[m] = mats[m]^T * mats[m]) */ int blas_nrows = (int)(mats[m]->nrows); ssyrk_(&uplo, &notrans, &blas_rank, &blas_nrows, &alpha, mats[m]->values, &blas_stride, &beta, ata[m]->values, &blas_stride); // printf("Update ata[m]:\n"); // ptiDumpMatrix(ata[m], stdout); } // Loop nmodes // PrintDenseValueVector(lambda, rank, "lambda", "debug.txt"); fit = KruskalTensorFit(ptien, lambda, mats, ata); ptiStopTimer(timer); double its_time = ptiElapsedTime(timer); ptiFreeTimer(timer); printf(" its = %3"HIPARTI_PRI_INDEX " ( %.3lf s ) fit = %0.5f delta = %+0.4e\n", it+1, its_time, fit, fit - oldfit); if(it > 0 && fabs(fit - oldfit) < tol) { break; } oldfit = fit; } // Loop niters GetFinalLambda(rank, nmodes, mats, lambda); for(ptiIndex m=0; m < nmodes+1; ++m) { ptiFreeMatrix(ata[m]); } free(ata); free(mats_order); return fit; } int ptiOmpCpdAls( ptiSparseTensor const * const ptien, ptiIndex const rank, ptiIndex const niters, double const tol, const int tk, const int use_reduce, ptiKruskalTensor * ktensor) { ptiIndex nmodes = ptien->nmodes; #ifdef HIPARTI_USE_MAGMA magma_init(); #endif /* Initialize factor matrices */ ptiIndex max_dim = ptiMaxIndexArray(ptien->ndims, nmodes); ptiMatrix ** mats = (ptiMatrix **)malloc((nmodes+1) * sizeof(*mats)); for(ptiIndex m=0; m < nmodes+1; ++m) { mats[m] = (ptiMatrix *)malloc(sizeof(ptiMatrix)); } for(ptiIndex m=0; m < nmodes; ++m) { ptiAssert(ptiNewMatrix(mats[m], ptien->ndims[m], rank) == 0); // ptiAssert(ptiConstantMatrix(mats[m], 1) == 0); ptiAssert(ptiRandomizeMatrix(mats[m]) == 0); } ptiAssert(ptiNewMatrix(mats[nmodes], max_dim, rank) == 0); ptiAssert(ptiConstantMatrix(mats[nmodes], 0) == 0); ptiMatrix ** copy_mats; if(use_reduce == 1) { copy_mats = (ptiMatrix **)malloc(tk * sizeof(*copy_mats)); for(int t=0; t<tk; ++t) { copy_mats[t] = (ptiMatrix *)malloc(sizeof(ptiMatrix)); ptiAssert(ptiNewMatrix(copy_mats[t], max_dim, rank) == 0); ptiAssert(ptiConstantMatrix(copy_mats[t], 0) == 0); } } ptiTimer timer; ptiNewTimer(&timer, 0); ptiStartTimer(timer); ktensor->fit = OmpCpdAlsStep(ptien, rank, niters, tol, tk, use_reduce, mats, copy_mats, ktensor->lambda); ptiStopTimer(timer); ptiPrintElapsedTime(timer, "CPU SpTns CPD-ALS"); ptiFreeTimer(timer); ktensor->factors = mats; #ifdef HIPARTI_USE_MAGMA magma_finalize(); #endif ptiFreeMatrix(mats[nmodes]); if(use_reduce == 1) { for(int t=0; t<tk; ++t) { ptiFreeMatrix(copy_mats[t]); free(copy_mats[t]); } free(copy_mats); } return 0; }
Example_target.2.c
/* * @@name: target.2c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_4.0 */ extern void init(float*, float*, int); extern void output(float*, int); void vec_mult(int N) { int i; float p[N], v1[N], v2[N]; init(v1, v2, N); #pragma omp target map(v1, v2, p) #pragma omp parallel for for (i=0; i<N; i++) p[i] = v1[i] * v2[i]; output(p, N); }
CorotateFEMConstraint.h
#ifndef __COROTATE_FEM_CONSTRAINT_H__ #define __COROTATE_FEM_CONSTRAINT_H__ #include "Constraint.h" #include "Tensor.h" #include <Eigen/Core> #include <Eigen/Sparse> #include <Eigen/Geometry> #include <Eigen/SVD> #include <Eigen/Eigenvalues> #include <iostream> namespace FEM { template <typename TinyScalar, typename TinyConstants> class CorotateFEMConstraint : public Constraint<TinyScalar, TinyConstants> { public: CorotateFEMConstraint( const TinyScalar& stiffness, const TinyScalar& poisson_ratio, int i0,int i1,int i2,int i3, TinyScalar volume,const Eigen::Matrix<TinyScalar, 3, 3>& invDm); int GetI0() {return mi0;} int GetI1() {return mi1;} int GetI2() {return mi2;} int GetI3() {return mi3;} int GetDof() override; ConstraintType GetType() override; // private: void ComputeF(const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x); void ComputeP(Eigen::Matrix<TinyScalar, 3, 3>& P); void ComputedPdF(Tensor3333& dPdF); void ComputeSVD(const Eigen::Matrix<TinyScalar, 3, 3>& F); void EvaluateJMatrix(int index, std::vector<Eigen::Triplet<TinyScalar>>& J_triplets); void EvaluateLMatrix(std::vector<Eigen::Triplet<TinyScalar>>& L_triplets); void EvaluateDVector(const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x); void GetDVector(int& index,Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& d); void fixIndex(int offset); // protected: int mi0,mi1,mi2,mi3; TinyScalar mVol; TinyScalar mMu,mLambda; TinyScalar mPoissonRatio; Eigen::Matrix<TinyScalar, 3, 3> mInvDm; Eigen::Matrix<TinyScalar, 3, 3> mDs; // [x1-x0; x2-x0; x3-x0] Eigen::Matrix<TinyScalar, 3, 3> mF; Eigen::Matrix<TinyScalar, 3, 3> mR,mU,mV,mD; // R=UV; D=svd.singularValues(); Eigen::Matrix<TinyScalar, 3, 3> md; Eigen::Matrix<TinyScalar, 3, 3> md_volume; }; #define EPS 1E-4 template <typename TinyScalar, typename TinyConstants> CorotateFEMConstraint<TinyScalar, TinyConstants>:: CorotateFEMConstraint(const TinyScalar& stiffness,const TinyScalar& poisson_ratio, int i0,int i1,int i2,int i3,TinyScalar vol,const Eigen::Matrix<TinyScalar, 3, 3>& invDm) :Constraint<TinyScalar, TinyConstants>(stiffness), mPoissonRatio(poisson_ratio), mi0(i0),mi1(i1),mi2(i2),mi3(i3), mMu(stiffness/((1.0+poisson_ratio))), mLambda(stiffness*poisson_ratio/((1.0+poisson_ratio)*(1-2.0*poisson_ratio))), mVol(vol),mInvDm(invDm),mDs(Eigen::Matrix<TinyScalar, 3, 3>::Zero()) { mF.setZero(); mR.setZero(); mU.setZero(); mV.setZero(); } template <typename TinyScalar, typename TinyConstants> void CorotateFEMConstraint<TinyScalar, TinyConstants>::fixIndex(int offset) { mi0 += offset; mi1 += offset; mi2 += offset; mi3 += offset; } template <typename TinyScalar, typename TinyConstants> void CorotateFEMConstraint<TinyScalar, TinyConstants>:: ComputeF (const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x) { Eigen::Matrix<TinyScalar, 3, 1> x0(x.template block<3,1>(mi0*3,0)); Eigen::Matrix<TinyScalar, 3, 3> Ds; Ds.template block<3,1>(0,0) = x.template block<3,1>(mi1*3,0)-x0; Ds.template block<3,1>(0,1) = x.template block<3,1>(mi2*3,0)-x0; Ds.template block<3,1>(0,2) = x.template block<3,1>(mi3*3,0)-x0; mDs = Ds; mF = mDs * mInvDm; ComputeSVD(mF); } template <typename TinyScalar, typename TinyConstants> void CorotateFEMConstraint<TinyScalar, TinyConstants>:: ComputeP (Eigen::Matrix<TinyScalar, 3, 3>& P) { P = mMu*(mF - mR) + mLambda*((mR.transpose()*mF-Eigen::Matrix<TinyScalar, 3, 3>::Identity()).trace())*mR; } template <typename TinyScalar, typename TinyConstants> void CorotateFEMConstraint<TinyScalar, TinyConstants>:: ComputedPdF (Tensor3333& dPdF) { Tensor3333 dFdF, dRdF; dFdF.SetIdentity(); for(int i =0;i<3;i++) { for(int j=0;j<3;j++) { Eigen::Matrix<TinyScalar, 3, 3> M = mU.transpose()*dFdF(i,j)*mV; if(fabs(mD(0,0)-mD(1,1)) < EPS && fabs(mD(0,0)-mD(2,2)) < EPS) { Eigen::Matrix<TinyScalar, 3, 3> off_diag_M; off_diag_M.setZero(); for(int a=0; a<3; a++) { for(int b=0; b<3; b++) { if(a==b) continue; else off_diag_M(a,b) = M(a,b) / mD(0,0); } } dRdF(i,j) = mU*off_diag_M*mV.transpose(); } else { Eigen::Matrix<TinyScalar, 2, 1> unknown_side, known_side; Eigen::Matrix2d known_matrix; Eigen::Matrix<TinyScalar, 3, 3> U_tilde, V_tilde; U_tilde.setZero(); V_tilde.setZero(); Eigen::Matrix2d reg; reg.setZero(); reg(0,0) = reg(1,1) = EPS; for (unsigned int row = 0; row < 3; row++) { for (unsigned int col = 0; col < row; col++) { known_side = Eigen::Matrix<TinyScalar, 2, 1>(M(col, row), M(row, col)); known_matrix.block<2, 1>(0, 0) = Eigen::Matrix<TinyScalar, 2, 1>(-mD(row,row), mD(col,col)); known_matrix.block<2, 1>(0, 1) = Eigen::Matrix<TinyScalar, 2, 1>(-mD(col,col), mD(row,row)); if (fabs(mD(row,row) - mD(col,col) < EPS)) known_matrix += reg; else assert(fabs(known_matrix.determinant()) > 1E-6); unknown_side = known_matrix.inverse() * known_side; U_tilde(row, col) = unknown_side[0]; U_tilde(col, row) = -U_tilde(row, col); V_tilde(row, col) = unknown_side[1]; V_tilde(col, row) = -V_tilde(row, col); } } Eigen::Matrix<TinyScalar, 3, 3> deltaU = mU*U_tilde; Eigen::Matrix<TinyScalar, 3, 3> deltaV = V_tilde*mV.transpose(); dRdF(i, j) = deltaU*mV.transpose() + mU*deltaV; } } } Tensor3333 lambda_term; for(int i=0; i<3; i++) { for(int j=0; j<3; j++) { lambda_term(i,j) = (dRdF(i,j).transpose()*mF+mR.transpose()*dFdF(i,j)).trace()*mR + (mR.transpose()*mF-Eigen::Matrix<TinyScalar, 3, 3>::Identity()).trace()*dRdF(i,j); } } dPdF = (dFdF-dRdF)*mMu + mLambda*lambda_term; } template <typename TinyScalar, typename TinyConstants> void CorotateFEMConstraint<TinyScalar, TinyConstants>:: ComputeSVD(const Eigen::Matrix<TinyScalar, 3, 3>& F) { // #pragma omp critical // { Eigen::JacobiSVD<Eigen::Matrix<TinyScalar, 3, 3>> svd(F, Eigen::ComputeFullU | Eigen::ComputeFullV); Eigen::Matrix<TinyScalar, 3, 1> D = svd.singularValues(); mD.setZero(); mD(0,0) = D[0]; mD(1,1) = D[1]; mD(2,2) = D[2]; mU = svd.matrixU(); mV = svd.matrixV(); mR = mU*mV.transpose(); mF = F; // } } template <typename TinyScalar, typename TinyConstants> int CorotateFEMConstraint<TinyScalar, TinyConstants>:: GetDof() { return 6; } template <typename TinyScalar, typename TinyConstants> ConstraintType CorotateFEMConstraint<TinyScalar, TinyConstants>:: GetType() { return ConstraintType::COROTATE; } template <typename TinyScalar, typename TinyConstants> void CorotateFEMConstraint<TinyScalar, TinyConstants>:: EvaluateJMatrix(int index, std::vector<Eigen::Triplet<TinyScalar>>& J_triplets) { Eigen::Matrix<TinyScalar, Eigen::Dynamic, Eigen::Dynamic> Ai(3*3,3*4); TinyScalar d11 = mInvDm(0,0); TinyScalar d12 = mInvDm(0,1); TinyScalar d13 = mInvDm(0,2); TinyScalar d21 = mInvDm(1,0); TinyScalar d22 = mInvDm(1,1); TinyScalar d23 = mInvDm(1,2); TinyScalar d31 = mInvDm(2,0); TinyScalar d32 = mInvDm(2,1); TinyScalar d33 = mInvDm(2,2); Ai<< -d11-d21-d31,0,0,d11,0,0,d21,0,0,d31,0,0, 0,-d11-d21-d31,0,0,d11,0,0,d21,0,0,d31,0, 0,0,-d11-d21-d31,0,0,d11,0,0,d21,0,0,d31, -d12-d22-d32,0,0,d12,0,0,d22,0,0,d32,0,0, 0,-d12-d22-d32,0,0,d12,0,0,d22,0,0,d32,0, 0,0,-d12-d22-d32,0,0,d12,0,0,d22,0,0,d32, -d13-d23-d33,0,0,d13,0,0,d23,0,0,d33,0,0, 0,-d13-d23-d33,0,0,d13,0,0,d23,0,0,d33,0, 0,0,-d13-d23-d33,0,0,d13,0,0,d23,0,0,d33; Eigen::Matrix<TinyScalar, Eigen::Dynamic, Eigen::Dynamic> MuAiT = mMu*mVol*Ai.transpose(); int idx[4] = {mi0,mi1,mi2,mi3}; for(int i =0;i<4;i++) { for(int j=0;j<3;j++) { //MuAiT.block [i,j] -- 3x3 matrix J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*(index+j)+0, MuAiT(3*i+0, 3*j+0))); J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*(index+j)+1, MuAiT(3*i+0, 3*j+1))); J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*(index+j)+2, MuAiT(3*i+0, 3*j+2))); J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*(index+j)+0, MuAiT(3*i+1, 3*j+0))); J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*(index+j)+1, MuAiT(3*i+1, 3*j+1))); J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*(index+j)+2, MuAiT(3*i+1, 3*j+2))); J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*(index+j)+0, MuAiT(3*i+2, 3*j+0))); J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*(index+j)+1, MuAiT(3*i+2, 3*j+1))); J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*(index+j)+2, MuAiT(3*i+2, 3*j+2))); } } index+=3; MuAiT = (MuAiT*mPoissonRatio).eval(); for(int i =0;i<4;i++) { for(int j=0;j<3;j++) { //MuAiT.block [i,j] -- 3x3 matrix J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*(index+j)+0, MuAiT(3*i+0, 3*j+0))); J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*(index+j)+1, MuAiT(3*i+0, 3*j+1))); J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*(index+j)+2, MuAiT(3*i+0, 3*j+2))); J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*(index+j)+0, MuAiT(3*i+1, 3*j+0))); J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*(index+j)+1, MuAiT(3*i+1, 3*j+1))); J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*(index+j)+2, MuAiT(3*i+1, 3*j+2))); J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*(index+j)+0, MuAiT(3*i+2, 3*j+0))); J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*(index+j)+1, MuAiT(3*i+2, 3*j+1))); J_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*(index+j)+2, MuAiT(3*i+2, 3*j+2))); } } index+=3; } template <typename TinyScalar, typename TinyConstants> void CorotateFEMConstraint<TinyScalar, TinyConstants>:: EvaluateLMatrix(std::vector<Eigen::Triplet<TinyScalar>>& L_triplets) { Eigen::Matrix<TinyScalar, Eigen::Dynamic, Eigen::Dynamic> Ai(3*3,3*4); TinyScalar d11 = mInvDm(0,0); TinyScalar d12 = mInvDm(0,1); TinyScalar d13 = mInvDm(0,2); TinyScalar d21 = mInvDm(1,0); TinyScalar d22 = mInvDm(1,1); TinyScalar d23 = mInvDm(1,2); TinyScalar d31 = mInvDm(2,0); TinyScalar d32 = mInvDm(2,1); TinyScalar d33 = mInvDm(2,2); Ai<< -d11-d21-d31,0,0,d11,0,0,d21,0,0,d31,0,0, 0,-d11-d21-d31,0,0,d11,0,0,d21,0,0,d31,0, 0,0,-d11-d21-d31,0,0,d11,0,0,d21,0,0,d31, -d12-d22-d32,0,0,d12,0,0,d22,0,0,d32,0,0, 0,-d12-d22-d32,0,0,d12,0,0,d22,0,0,d32,0, 0,0,-d12-d22-d32,0,0,d12,0,0,d22,0,0,d32, -d13-d23-d33,0,0,d13,0,0,d23,0,0,d33,0,0, 0,-d13-d23-d33,0,0,d13,0,0,d23,0,0,d33,0, 0,0,-d13-d23-d33,0,0,d13,0,0,d23,0,0,d33; Eigen::Matrix<TinyScalar, Eigen::Dynamic, Eigen::Dynamic> MuAiTAi = mMu*mVol*((Ai.transpose())*Ai); int idx[4] = {mi0,mi1,mi2,mi3}; //MuAiT --- 12x12 matrix for(int i =0;i<4;i++) { for(int j=0;j<4;j++) { //MuAiTAi.block [i,j] -- 3x3 matrix L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*idx[j]+0, MuAiTAi(3*i+0, 3*j+0))); L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*idx[j]+1, MuAiTAi(3*i+0, 3*j+1))); L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*idx[j]+2, MuAiTAi(3*i+0, 3*j+2))); L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*idx[j]+0, MuAiTAi(3*i+1, 3*j+0))); L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*idx[j]+1, MuAiTAi(3*i+1, 3*j+1))); L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*idx[j]+2, MuAiTAi(3*i+1, 3*j+2))); L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*idx[j]+0, MuAiTAi(3*i+2, 3*j+0))); L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*idx[j]+1, MuAiTAi(3*i+2, 3*j+1))); L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*idx[j]+2, MuAiTAi(3*i+2, 3*j+2))); } } MuAiTAi = (MuAiTAi*mPoissonRatio).eval(); for(int i =0;i<4;i++) { for(int j=0;j<4;j++) { //MuAiTAi.block [i,j] -- 3x3 matrix L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*idx[j]+0, MuAiTAi(3*i+0, 3*j+0))); L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*idx[j]+1, MuAiTAi(3*i+0, 3*j+1))); L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+0, 3*idx[j]+2, MuAiTAi(3*i+0, 3*j+2))); L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*idx[j]+0, MuAiTAi(3*i+1, 3*j+0))); L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*idx[j]+1, MuAiTAi(3*i+1, 3*j+1))); L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+1, 3*idx[j]+2, MuAiTAi(3*i+1, 3*j+2))); L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*idx[j]+0, MuAiTAi(3*i+2, 3*j+0))); L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*idx[j]+1, MuAiTAi(3*i+2, 3*j+1))); L_triplets.push_back(Eigen::Triplet<TinyScalar>(3*idx[i]+2, 3*idx[j]+2, MuAiTAi(3*i+2, 3*j+2))); } } } template <typename TinyScalar, typename TinyConstants> void CorotateFEMConstraint<TinyScalar, TinyConstants>:: EvaluateDVector(const Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& x) { ComputeF(x); md = mR; if(mF.determinant()<0) md.template block<3,1>(0,2) = -mR.template block<3,1>(0,2); Eigen::Matrix<TinyScalar, 3, 1> S = mD.diagonal(); Eigen::Matrix<TinyScalar, 3, 1> D; D.setZero(); TinyScalar CD; for(int i=0;i<5;i++) { CD = (S[0]+D[0])*(S[1]+D[1])*(S[2]+D[2])-1; Eigen::Matrix<TinyScalar, 3, 1> gradCD( (S[1]+D[1])*(S[2]+D[2]), (S[0]+D[0])*(S[2]+D[2]), (S[0]+D[0])*(S[1]+D[1])); D = (gradCD.dot(D) -CD)/(gradCD.squaredNorm())*gradCD; } md_volume = mU*((S+D).asDiagonal())*mV.transpose(); } template <typename TinyScalar, typename TinyConstants> void CorotateFEMConstraint<TinyScalar, TinyConstants>:: GetDVector(int& index,Eigen::Matrix<TinyScalar, Eigen::Dynamic, 1>& d) { d.template block<3,1>(3*(index+0),0) = md.template block<3,1>(0,0); d.template block<3,1>(3*(index+1),0) = md.template block<3,1>(0,1); d.template block<3,1>(3*(index+2),0) = md.template block<3,1>(0,2); index+=3; d.template block<3,1>(3*(index+0),0) = md_volume.template block<3,1>(0,0); d.template block<3,1>(3*(index+1),0) = md_volume.template block<3,1>(0,1); d.template block<3,1>(3*(index+2),0) = md_volume.template block<3,1>(0,2); index+=3; } #undef EPS }; #endif
ParSHUM_schur_matrix.c
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <assert.h> #include <strings.h> #include <unistd.h> #include <math.h> #include <omp.h> #include "ParSHUM_auxiliary.h" #include "ParSHUM_schur_matrix.h" #define NB_PER_THREAD 2000 ParSHUM_schur_matrix ParSHUM_schur_matrix_create() { ParSHUM_schur_matrix self = calloc((size_t) 1, sizeof(*self)); return self; } void ParSHUM_schur_matrix_allocate(ParSHUM_schur_matrix self, int n, int m, long nnz, int debug, ParSHUM_verbose verbose, int nb_threads, double extra_space, double extra_space_inbetween) { int i, larger_size; self->nb_threads = nb_threads; self->n = n; self->m = m; self->verbose = verbose; larger_size = n > m ? n : m; self->CSC = calloc( (size_t) n, sizeof(*self->CSC)); self->CSR = calloc( (size_t) m, sizeof(*self->CSR)); self->extra_space = extra_space_inbetween; self->alignment = sysconf (_SC_LEVEL1_DCACHE_LINESIZE); self->internal_mem = ParSHUM_internal_mem_create((size_t) nnz * (1 + extra_space + extra_space_inbetween) * (sizeof(*self->CSC[0].val) + sizeof(*self->CSC[0].row) + sizeof(*self->CSR[0].col)), (size_t) self->alignment); self->nnz = 0; self->debug = debug; self->data_struct = malloc((size_t) nb_threads * sizeof(*self->data_struct)); for( i = 0; i < nb_threads; i++) self->data_struct[i] = calloc((size_t) larger_size, sizeof(**self->data_struct)); self->base = malloc((size_t) nb_threads * sizeof(*self->base)); int_array_memset(self->base, 1, nb_threads); self->row_locks = malloc((size_t) m * sizeof(*self->row_locks)); for( i = 0; i < m; i++) omp_init_lock(&self->row_locks[i]); self->col_locks = malloc((size_t) m * sizeof(*self->col_locks)); for( i = 0; i < m; i++) omp_init_lock(&self->col_locks[i]); } void ParSHUM_CSC_alloc(ParSHUM_internal_mem mem, CSC_struct *CSC, int nb_elem, long alignment) { double *tmp; int part = (int) alignment / sizeof(CSC->row); if (nb_elem % part) nb_elem += (part - nb_elem % sizeof(CSC->row)) ; ParSHUM_internal_mem_alloc(mem, (void **) &CSC->val, (size_t) nb_elem * (sizeof(CSC->val) + sizeof(CSC->row)) ); tmp = &CSC->val[nb_elem]; CSC->row = (int *) tmp; CSC->nb_elem = 0; CSC->nb_free = nb_elem; } void ParSHUM_CSR_alloc(ParSHUM_internal_mem mem, CSR_struct *CSR, int nb_elem, long alignment) { int part = (int) alignment / sizeof(CSR->col); if (nb_elem % part) nb_elem += (part - nb_elem % sizeof(CSR->col)); ParSHUM_internal_mem_alloc(mem, (void **) &CSR->col, nb_elem * sizeof(CSR->col) ); CSR->nb_elem = 0; CSR->nb_free = nb_elem; } void ParSHUM_schur_get_singletons(ParSHUM_schur_matrix self, int done_pivots, int previous_step_pivots, double val_tol, int *nb_col_singletons, int *nb_row_singletons, int *cols, int *rows, int *distributions, int nb_BB_cols, int *col_perm, int *row_perm, int *invr_col_perm, int *invr_row_perm, void **workspace) { int n = self->n - done_pivots + previous_step_pivots - nb_BB_cols; int m = self->m - done_pivots + previous_step_pivots; int i, _done_pivots = done_pivots; int needed_pivots = self->n < self->m ? self->n : self->m; needed_pivots -= done_pivots; int nb_threads = self->nb_threads; int nb_threads_ = self->nb_threads; int _nb_col_singletons = 0, _nb_row_singletons = 0; int sizes_m[nb_threads+1], original_sizes_m[nb_threads+1]; int sizes_n[nb_threads+1], original_sizes_n[nb_threads+1]; int local_nb_sing[nb_threads]; int part_m = m / nb_threads; int part_n = n / nb_threads; if ( nb_threads * NB_PER_THREAD > n) { nb_threads = nb_threads_ = n / NB_PER_THREAD; nb_threads = nb_threads_ = !nb_threads ? 1 : nb_threads; } for( i = 0; i < nb_threads; i++) { sizes_m[i] = original_sizes_m[i] = i * part_m; sizes_n[i] = original_sizes_n[i] = i * part_n; } sizes_m[nb_threads] = original_sizes_m[nb_threads] = m; sizes_n[nb_threads] = original_sizes_n[nb_threads] = n; #pragma omp parallel num_threads(nb_threads) shared(self, rows, cols, row_perm, col_perm, invr_row_perm, invr_col_perm, done_pivots, _done_pivots, needed_pivots, nb_threads, nb_threads_, _nb_row_singletons, _nb_col_singletons, local_nb_sing, workspace, sizes_m, original_sizes_m, sizes_n, original_sizes_n, val_tol, nb_BB_cols) default(none) //proc_bind(spread) { int j; int me = omp_get_thread_num(); int start = original_sizes_m[me]; int end = original_sizes_m[me+1]; int *row_singeltons = (int *) workspace[me]; int nb_singeltons = 0; for(j = start; j < end; ) { int row = rows[j]; if (invr_row_perm[rows[j]] != ParSHUM_UNUSED_PIVOT) { rows[j] = rows[--end]; continue; } if ( self->CSR[row].nb_elem == 1 ) row_singeltons[nb_singeltons++] = row; j++; } sizes_m[me+1] = end - start ; local_nb_sing[me] = nb_singeltons; #pragma omp barrier int nb_elem = local_nb_sing[me]; int perm_place = 0; for ( j = 0; j < me; j++) perm_place += local_nb_sing[j]; perm_place += _done_pivots; for ( j = 0; j < nb_elem; j++) { int row = row_singeltons[j]; int col = *self->CSR[row].col; CSC_struct *CSC = &self->CSC[col]; double *vals = CSC->val; int *rows = CSC->row; int col_nb_elem = CSC->nb_elem; int d, tmp_int; double tmp_dbl; for ( d = 0; d < col_nb_elem; d++) if ( rows[d] == row) break; tmp_int = rows[d]; rows[d] = rows[col_nb_elem-1]; rows[col_nb_elem-1] = tmp_int; tmp_dbl = vals[d]; vals[d] = vals[col_nb_elem-1]; vals[col_nb_elem-1] = tmp_dbl; int next_pivot = perm_place + j; col_perm[next_pivot] = col; invr_col_perm[col] = next_pivot; row_perm[next_pivot] = row; invr_row_perm[row] = next_pivot; } #pragma omp atomic done_pivots += nb_elem; #pragma omp atomic _nb_row_singletons += nb_elem; /* #pragma omp atomic capture */ /* { */ /* bb++; tt = bb; */ /* } */ /* printf("tt = %d nb_threads = %d\n", tt, nb_threads); */ /* if (tt == nb_threads) { */ /* #pragma omp barrier */ /* #pragma omp single */ /* { */ /* int start = _done_pivots; */ /* int end = _done_pivots + _nb_row_singletons; */ /* for ( j = start; j < end; ) { */ /* if ( j != invr_col_perm[col_perm[j]]) { */ /* invr_row_perm[row_perm[j]] = ParSHUM_UNUSED_PIVOT; */ /* /\* invr_col_perm[col_perm[j]] = ParSHUM_UNUSED_PIVOT; *\/ */ /* row_perm[j] = row_perm[--end]; */ /* row_perm[end] = ParSHUM_UNUSED_PIVOT; */ /* col_perm[j] = col_perm[end]; */ /* col_perm[end] = ParSHUM_UNUSED_PIVOT; */ /* } else { */ /* j++; */ /* } */ /* } */ /* _nb_row_singletons = end - _done_pivots; */ /* } */ #pragma omp single { for ( j = 1; j <= nb_threads_; j++) sizes_m[j] += original_sizes_m[j-1]; start = 1; end = nb_threads_; /* This loop takes care to fill in all the holes in the row array. */ while (start < end ) { int hole_size = 0; if ( sizes_m[start] < original_sizes_m[start]) { hole_size = original_sizes_m[start] - sizes_m[start]; } else { start++; continue; } int avail_size = 0; if (sizes_m[end] > original_sizes_m[end - 1]) { avail_size = sizes_m[end] - original_sizes_m[end - 1] ; } else { end--; nb_threads_--; continue; } size_t size; int *source; int *dst; dst = &rows[sizes_m[start]]; if (avail_size > hole_size) { size = hole_size * sizeof(*rows); source = &rows[sizes_m[end] - hole_size]; sizes_m[start++] += hole_size; sizes_m[end ] -= hole_size; } else if (avail_size < hole_size) { size = avail_size * sizeof(*rows); source = &rows[sizes_m[end] - avail_size]; if (start == end - 1 ) { sizes_m[start] += avail_size; sizes_m[end--] = sizes_m[start]; nb_threads_--; } else { sizes_m[start] += avail_size; sizes_m[end--] -= avail_size; nb_threads_--; } } else { size = avail_size * sizeof(*rows); source = &rows[sizes_m[end] - avail_size]; sizes_m[start++] += avail_size; sizes_m[end-- ] -= avail_size; nb_threads_--; } #pragma omp task firstprivate(dst, source, size) { memcpy(dst, source, size); } } if (sizes_m[nb_threads_] != self->m - _done_pivots) ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, "not all the rows are taken out from rows"); } #pragma omp barrier start = original_sizes_n[me]; end = original_sizes_n[me+1]; int *col_singeltons = (int *) workspace[me]; nb_singeltons = 0; for(j = start; j < end; ) { int col = cols[j]; if (invr_col_perm[col] != ParSHUM_UNUSED_PIVOT && invr_col_perm[col] < _done_pivots) { cols[j] = cols[--end]; continue; } if (self->CSC[col].nb_elem == 1) col_singeltons[nb_singeltons++] = col; j++; } sizes_n[me+1] = end - start ; local_nb_sing[me] = nb_singeltons; #pragma omp barrier #pragma omp single { nb_threads_ = nb_threads; for ( j = 1; j <= nb_threads_; j++) sizes_n[j] += original_sizes_n[j-1]; start = 1; end = nb_threads_; while (start < end ) { int hole_size = 0; if ( sizes_n[start] < original_sizes_n[start]) { hole_size = original_sizes_n[start] - sizes_n[start]; } else { start++; continue; } int avail_size = 0; if (sizes_n[end] > original_sizes_n[end - 1]) { avail_size = sizes_n[end] - original_sizes_n[end - 1] ; } else { end--; nb_threads_--; continue; } size_t size; int *source; int *dst; dst = &cols[sizes_n[start]]; if (avail_size > hole_size) { size = hole_size * sizeof(*cols); source = &cols[sizes_n[end] - hole_size]; sizes_n[start++] += hole_size; sizes_n[end ] -= hole_size; } else if (avail_size < hole_size) { size = avail_size * sizeof(*cols); source = &cols[sizes_n[end] - avail_size]; if (start == end - 1 ) { sizes_n[start] += avail_size; sizes_n[end--] = sizes_n[start]; nb_threads_--; } else { sizes_n[start] += avail_size; sizes_n[end--] -= avail_size; nb_threads_--; } } else { size = avail_size * sizeof(*cols); source = &cols[sizes_n[end] - avail_size]; sizes_n[start++] += avail_size; sizes_n[end-- ] -= avail_size; nb_threads_--; } #pragma omp task firstprivate(dst, source, size) { memcpy(dst, source, size); } } if (sizes_n[nb_threads_] != self->n - _done_pivots - nb_BB_cols) ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, "not all the cols are taken out from cols"); } #pragma omp single { int k; for (j = 0; j < nb_threads; j++ ) { int *col_singeltons = (int *) workspace[j]; int nb_elem = local_nb_sing[j]; for ( k = 0; k < nb_elem; k++) { int col = col_singeltons[k]; int row = *self->CSC[col].row; if ( (_nb_row_singletons + _nb_col_singletons) < needed_pivots && invr_row_perm[row] == ParSHUM_UNUSED_PIVOT) { int next_pivot = done_pivots + _nb_col_singletons++; col_perm[next_pivot] = col; invr_col_perm[col] = next_pivot; row_perm[next_pivot] = row; invr_row_perm[row] = next_pivot; } } } } } *nb_col_singletons = _nb_col_singletons; *nb_row_singletons = _nb_row_singletons; } void ParSHUM_schur_matrix_init_ptr(ParSHUM_schur_matrix self, long *col_ptr, int *row_sizes) { int i; int n = self->n; int m = self->m; ParSHUM_internal_mem memory = self->internal_mem; // handeling the CSC part for(i = 0; i < n; i++) ParSHUM_CSC_alloc(memory, &self->CSC[i], (int) (1 + self->extra_space) * (col_ptr[i+1] - col_ptr[i]), self->alignment); // handeling the CSR part for(i = 0; i < m; i++) ParSHUM_CSR_alloc(memory, &self->CSR[i], (int) (1 + self->extra_space) * row_sizes[i], self->alignment); } void ParSHUM_CSC_update_col_max(CSC_struct *CSC, double value_tol) { int i; int nb_elem = CSC->nb_elem; double max = 0.0; double *vals = CSC->val; int *rows = CSC->row; for(i = 0; i < nb_elem; i++) { double tmp = fabs(vals[i]); if (tmp > max) max = tmp; } CSC->col_max = max; max *= value_tol; for(i = 0; i < nb_elem; ) { double val = vals[i]; if ( fabs(vals[i]) < max ) { int row = rows[i]; vals[i] = vals[--nb_elem]; vals[nb_elem] = val; rows[i] = rows[nb_elem]; rows[nb_elem] = row; } else { i++; } } CSC->nb_numerical_eligible = i; } void ParSHUM_schur_matrix_copy(ParSHUM_matrix A, ParSHUM_schur_matrix self, double value_tol) { int i, j; int *row_sizes; int n = self->n; row_sizes = ParSHUM_matrix_rows_sizes(A); ParSHUM_schur_matrix_init_ptr(self, A->col_ptr, row_sizes); free(row_sizes); self->nnz = A->nnz; for( i = 0; i < n; i++) { long A_col_start = A->col_ptr[i]; long A_col_end = A->col_ptr[i+1]; long col_length = A_col_end - A_col_start; double *CSC_vals = self->CSC[i].val; int *CSC_rows = self->CSC[i].row; // handle the copy of the column into the CSC structure memcpy((void *) CSC_rows, (void *) &A->row[A_col_start], col_length * sizeof(*A->row)); memcpy((void *) CSC_vals, (void *) &A->val[A_col_start], col_length * sizeof(*A->val)); self->CSC[i].nb_elem += col_length; self->CSC[i].nb_free -= col_length; ParSHUM_CSC_update_col_max(&self->CSC[i], value_tol); // handle the copy of the column into the CSR structure for(j = A_col_start; j < A_col_end; j++) { int row = A->row[j]; CSR_struct *CSR = &self->CSR[row]; int *CSR_cols = CSR->col; CSR_cols[CSR->nb_elem++] = i; CSR->nb_free--; } } } void delete_entry_from_CSR(ParSHUM_schur_matrix self, int col, int row) { CSR_struct *CSR; int i, nb_elem, found, *cols; CSR = &self->CSR[row]; cols = CSR->col; nb_elem = CSR->nb_elem; found = 0; if(nb_elem < 1) { ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__,"tring to delete an entry in CSR with zero elems"); if(cols[0] == col) ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__,"even better, the entry is there!"); } for(i = 0; i < nb_elem; i++) if (cols[i] == col) { found = 1; break; } if ( !found ) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"tring to delete an non existing entry in CSR"); CSR->nb_elem--; CSR->nb_free++; cols[i] = cols[CSR->nb_elem]; } double delete_entry_from_CSC(ParSHUM_schur_matrix self, int col, int row) { CSC_struct *CSC; int i, nb_elem, found, *rows; double *vals, return_val = NAN; CSC = &self->CSC[col]; rows = CSC->row; vals = CSC->val; nb_elem = CSC->nb_elem; found = 0; for(i = 0; i < nb_elem; i++) if (rows[i] == row) { found = 1; return_val = vals[i]; break; } if ( !found ) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"tring to delete an non existing entry in CSC"); CSC->nb_elem--; CSC->nb_free++; rows[i] = rows[CSC->nb_elem]; vals[i] = vals[CSC->nb_elem]; return return_val; } double delete_entry(ParSHUM_schur_matrix self, int col, int row) { delete_entry_from_CSR(self, col, row); return delete_entry_from_CSC(self, col, row); } void ParSHUM_schur_matrix_update_LD_singeltons(ParSHUM_schur_matrix self, ParSHUM_matrix L, ParSHUM_matrix D, int *row_perm, int *col_perm, int *invr_col_perm, int nb_pivots) { int pivot, nb_threads = self->nb_threads; int nb_steps = (nb_pivots + nb_threads - 1 ) / nb_threads, step; int L_input_size = L->n; int D_input_size = D->n; if ( D->n + nb_pivots > D->allocated ) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "not enought memory in D matrix. this should never happen, so something went wrong"); for(pivot = 0; pivot < nb_pivots; pivot++) { int nb_elem = self->CSC[col_perm[pivot]].nb_elem - 1; int new_col_ptr = L->col_ptr[L->n] + nb_elem; if ( new_col_ptr > L->allocated) ParSHUM_matrix_realloc(L); L->n++; L->col_ptr[L->n] = new_col_ptr; L->nnz += nb_elem; } D->n += nb_pivots; #pragma omp parallel num_threads(nb_threads) shared(L_input_size, D_input_size) firstprivate(nb_threads, nb_pivots, col_perm, row_perm, self, L, D, nb_steps) private(step) default(none) //proc_bind(spread) { long S_nnz = 0; int me = omp_get_thread_num(); for(step = 0; step < nb_steps; step++) { int current_pivot = step * nb_threads + me; if ( current_pivot < nb_pivots) { CSC_struct *CSC; int i, nb_elem, L_current_col, row, col; int *rows, *L_rows; double *vals, *L_vals, pivot_val = NAN; col = col_perm[current_pivot]; row = row_perm[current_pivot]; CSC = &self->CSC[col]; nb_elem = CSC->nb_elem; vals = CSC->val; rows = CSC->row; L_current_col = L->col_ptr[L_input_size + current_pivot]; L_rows = L->row; L_vals = L->val; for(i = 0; i < nb_elem; i++) { if ( rows[i] != row) { L_rows[L_current_col] = rows[i]; L_vals[L_current_col] = vals[i]; L_current_col++; } else { D->val[D_input_size + current_pivot] = pivot_val = vals[i]; } omp_set_lock(&self->row_locks[rows[i]]); delete_entry_from_CSR(self, col, rows[i]); omp_unset_lock(&self->row_locks[rows[i]]); } /* TODO: we could split the previopud for in two fors: one before we found the pivot, update the begining, and then do the rest */ /* TODO do the delete_entru_from_CSR in a seperate loop maybe better??? try that option */ for( i = L->col_ptr[L_input_size + current_pivot]; i < L->col_ptr[L_input_size + current_pivot + 1]; i++) L_vals[i] /= pivot_val; /* TODO: recycle the col's memory */ CSC->nb_elem = 0; CSC->nb_free = 0; S_nnz += nb_elem; } } #pragma omp atomic self->nnz -= S_nnz; } } void ParSHUM_schur_matrix_update_LD(ParSHUM_schur_matrix self, ParSHUM_L_matrix L, ParSHUM_U_matrix U, ParSHUM_matrix D, int *row_perm, int *col_perm, int nb_pivots, int *invr_row_perm, int nb_row_singeltons, int nb_col_singeltons, void **workspace) { int nb_threads = self->nb_threads; long S_nnz = 0; long L_nnz = 0; int L_input_size = L->n; int D_input_size = D->n; if ( D->n + nb_pivots > D->allocated ) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "not enought memory in D matrix. this should never happen, so something went wrong"); D->n += nb_pivots; L->n += nb_pivots; #pragma omp parallel for num_threads(nb_threads) shared(nb_pivots,L_input_size,D_input_size,nb_row_singeltons) firstprivate(self, workspace, col_perm, L, D, U, invr_row_perm) reduction(+:S_nnz) reduction(+:L_nnz) default(none) //proc_bind(spread) for( int current_pivot = 0; current_pivot < nb_pivots; current_pivot++) { /* TODO: ParSHUM_verbose_trace_start_event(verbose, ParSHUM_UPDATE_L); */ int me = omp_get_thread_num(); int n = self->n; int m = self->m; int *U_rows = (int *) workspace[me]; int *tmp = &U_rows[m]; double *U_vals = (double *) tmp; CSC_struct *CSC; int i, nb_elem = 0, col; int L_indice = L_input_size + current_pivot; int *rows; double *vals, *L_vals, pivot_val = NAN; col = col_perm[current_pivot]; if ( col < n) { CSC = &self->CSC[col]; L->col[L_indice] = *CSC; vals = CSC->val; S_nnz += CSC->nb_elem; D->val[D_input_size + current_pivot] = pivot_val = vals[--L->col[L_indice].nb_elem]; } else { col_perm[current_pivot] %= n; col = col_perm[current_pivot]; CSC = &self->CSC[col]; L->col[L_indice] = *CSC; nb_elem = CSC->nb_elem - 1; vals = CSC->val; rows = CSC->row; S_nnz += CSC->nb_elem; D->val[D_input_size + current_pivot] = pivot_val = vals[--L->col[L_indice].nb_elem]; U_col *U_col = &U->col[col]; int U_col_new = 0; for(i = 0; i < nb_elem; ) { int tmp_row = rows[i]; if ( invr_row_perm[tmp_row] != ParSHUM_UNUSED_PIVOT ) { U_rows[U_col_new] = tmp_row; U_vals[U_col_new++] = vals[i]; vals[i] = vals[--nb_elem]; rows[i] = rows[ nb_elem]; } else { i++; } } L->col[L_indice].nb_elem = nb_elem; while(U_col->allocated - U_col->nb_elem < U_col_new) ParSHUM_U_col_realloc(U_col); memcpy(&U_col->val[U_col->nb_elem], U_vals, U_col_new * sizeof(*U_vals)); memcpy(&U_col->row[U_col->nb_elem], U_rows, U_col_new * sizeof(*U_rows)); U_col->nb_elem += U_col_new; } L_vals = L->col[L_indice].val; L_nnz += nb_elem; nb_elem = L->col[L_indice].nb_elem; for( i = 0; i < nb_elem; i++) L_vals[i] /= pivot_val; /* TODO: recycle the col's memory */ CSC->nb_elem = 0; CSC->nb_free = 0; } /* TODO: ParSHUM_verbose_trace_stop_event(verbose); */ self->nnz -= S_nnz; L->nnz += S_nnz; } void ParSHUM_schur_matrix_update_U_singletons(ParSHUM_schur_matrix S, ParSHUM_U_matrix U, ParSHUM_matrix D, ParSHUM_matrix L, int nb_pivots, int *col_perm, int *row_perm) { int nb_threads = S->nb_threads, d, sthg = L->col_ptr[L->n]; int nb_steps = ( nb_pivots + nb_threads -1 ) / nb_threads, step; if ( D->n + nb_pivots > D->allocated ) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "not enought memory in D matrix. this should never happen, so something went wrong"); for ( d = 0; d < nb_pivots; d++) { L->n++; L->col_ptr[L->n] = sthg; } #pragma omp parallel num_threads(nb_threads) firstprivate(nb_threads, nb_pivots, col_perm, row_perm, S,D,U, nb_steps) private(step) default(none) //proc_bind(spread) { int me = omp_get_thread_num(); for ( step = 0; step < nb_steps; step++) { int current_pivot = step * nb_threads + me; if ( current_pivot < nb_pivots) { CSC_struct *CSC; CSR_struct *CSR; int col, row, D_indice, row_n, i; int *row_cols; col = col_perm[current_pivot]; row = row_perm[current_pivot]; CSC = &S->CSC[col]; CSR = &S->CSR[row]; row_cols = CSR->col; if (CSC->nb_elem != 1) { printf("nb_elem = %d\n", CSC->nb_elem); ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "The pivot is not row singelton"); } if (CSC->row[0] != row) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "The pivot is not the same as before"); D_indice = 0;//__atomic_fetch_add(&D->n, 1, __ATOMIC_SEQ_CST); D->val[D_indice] = CSC->val[0]; delete_entry_from_CSR(S, col, row); row_n = CSR->nb_elem; for ( i = 0; i < row_n; i++) { int col1 = row_cols[i]; U_col *u_col = &U->col[col1]; double val; omp_set_lock(&S->col_locks[col1]); val = delete_entry_from_CSC(S, col1, row); omp_unset_lock(&S->col_locks[col1]); omp_set_lock(&u_col->lock); if (u_col->nb_elem == u_col->allocated) ParSHUM_U_col_realloc(u_col); u_col->row[u_col->nb_elem] = row; u_col->val[u_col->nb_elem] = val; u_col->nb_elem++; omp_unset_lock(&u_col->lock); } CSR->nb_elem = 0; CSR->nb_free = 0; CSC->nb_elem = 0; CSC->nb_free = 0; #pragma omp atomic S->nnz -= row_n + 1; #pragma omp atomic U->nnz += row_n; } } } } void ParSHUM_schur_matrix_update_U(ParSHUM_schur_matrix S, ParSHUM_U_matrix U, ParSHUM_matrix L, int nb_pivots, int *row_perm, ParSHUM_U_struct *U_struct, int U_new_n, int U_new_nnz) { int pivot, i, j; int nb_threads = S->nb_threads; int indices[nb_threads+1]; int nnz_part = U_new_nnz / nb_threads; if ( nb_pivots <= nb_threads) { nb_threads = nb_pivots; for( i = 0; i <= nb_threads; i++) indices[i] = i; } else { *indices = 0; for ( i = 1, j = 0; i < nb_threads; i++) { int part = 0; while ( part < nnz_part && j < nb_pivots ) part += S->CSR[row_perm[j++]].nb_elem; indices[i] = j; } indices[nb_threads] = nb_pivots; } for( i = 0; i < U_new_n; i++) { int col = U_struct[i].col; int nb_elem = U_struct[i].nb_elem; U_col *u_col = &U->col[col]; u_col->cost = 0; while(u_col->allocated - u_col->nb_elem < nb_elem) ParSHUM_U_col_realloc(u_col); } U->nnz += U_new_nnz; #pragma omp parallel num_threads(nb_threads) private(pivot, i) { int me = omp_get_thread_num(); long S_nnz = 0; for( pivot = indices[me]; pivot < indices[me+1]; pivot++) { int row = row_perm[pivot]; CSR_struct *CSR = &S->CSR[row]; int row_nb_elem = CSR->nb_elem; int *cols = CSR->col; for( i = 0; i < row_nb_elem; i++) { int current_col = cols[i]; U_col *u_col = &U->col[current_col]; int indice = 0;//__atomic_fetch_add(&u_col->nb_elem, 1, __ATOMIC_SEQ_CST); u_col->row[indice] = row; } CSR->nb_elem = 0; CSR->nb_free = 0; S_nnz += row_nb_elem; } #pragma omp atomic S->nnz -= S_nnz; } } void ParSHUM_schur_matrix_add_to_entry(ParSHUM_schur_matrix self, int row, int col, double val) { CSC_struct *CSC = &self->CSC[col]; int i; int nb_elem = CSC->nb_elem; int *rows = CSC->row; double *vals = CSC->val; for( i = 0; i < nb_elem; i++) if ( rows[i] == row) { #pragma omp atomic vals[i] += val; return; } ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "entry not found"); } void ParSHUM_CSC_update_col_max_array(CSC_struct *CSC, double *vals, int *rows, int nb_elem, double value_tol) { int i, nb_eligeble = 0, nb_ineligible = nb_elem; int *CSC_rows = CSC->row; double *CSC_vals = CSC->val; double max = 0.0; for( i = 0; i < nb_elem; i++) { double tmp = fabs(vals[i]); if (tmp > max) max = tmp; } CSC->col_max = max; max *= value_tol; for(i = 0; i < nb_elem; i++) { double val = vals[i]; if ( fabs(val) >= max ) { CSC_rows[nb_eligeble ] = rows[i]; CSC_vals[nb_eligeble++] = val; } else { CSC_rows[--nb_ineligible] = rows[i]; CSC_vals[ nb_ineligible] = val; } } CSC->nb_numerical_eligible = nb_eligeble; CSC->nb_elem = nb_elem; CSC->nb_free -= nb_elem; }; void ParSHUM_schur_matrix_update_S(ParSHUM_schur_matrix S, ParSHUM_L_matrix L, ParSHUM_U_matrix U, int *U_struct, int U_new_n, int *L_struct, int L_new_n, int *row_perms, int *invr_col_perm, int *invr_row_perm, int nb_pivots, int done_pivots, double value_tol, void **workspace) { int nb_threads = S->nb_threads; long S_new_nnz = 0; long U_new_nnz = 0; int start = done_pivots, end = done_pivots + nb_pivots; #pragma omp parallel num_threads(nb_threads) shared(S_new_nnz, U_new_nnz, workspace, U_new_n, start, end, L_new_n, ) firstprivate(S, L, U, done_pivots, U_struct, invr_row_perm, value_tol, L_struct, invr_col_perm, row_perms) default(none) //proc_bind(spread) { int me = omp_get_thread_num(); int m = S->m; int *schur_row_struct = S->data_struct[me]; int base = S->base[me]; int *tmp_rows = (int *) workspace[me]; int *tmp = &tmp_rows[m]; double *tmp_vals = (double *) tmp; #pragma omp for reduction(+:S_new_nnz) reduction(+:U_new_nnz) schedule(guided, 10) for ( int i = 0; i < U_new_n; i++) { int k, l; int col = U_struct[i]; U_col *U_col = &U->col[col]; CSC_struct *CSC = &S->CSC[col]; int S_col_nb_elem = CSC->nb_elem; int U_nb_elem = m; int S_nb_elem = 0; int *S_rows = CSC->row; double *S_vals = CSC->val; int *U_rows; double *U_vals; int needed_size; for ( k = 0; k < S_col_nb_elem; k++) { int S_row = S_rows[k]; if (invr_row_perm[S_row] != ParSHUM_UNUSED_PIVOT ) { tmp_rows[--U_nb_elem] = S_row; tmp_vals[ U_nb_elem] = S_vals[k]; } else { tmp_rows[S_nb_elem] = S_row; tmp_vals[S_nb_elem] = S_vals[k]; schur_row_struct[S_row] = base + S_nb_elem++; } } U_vals = &tmp_vals[U_nb_elem]; U_rows = &tmp_rows[U_nb_elem]; S_vals = tmp_vals; S_rows = tmp_rows; U_nb_elem = m - U_nb_elem; if (S_col_nb_elem != ( U_nb_elem + S_nb_elem )) ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__,"Something went wrong in the schur update"); U_new_nnz += (long) U_nb_elem; for ( l = 0; l < U_nb_elem; l++) { int L_col = invr_row_perm[U_rows[l]]; CSC_struct *L_CSC = &L->col[L_col]; double U_val = U_vals[l]; int *L_rows = L_CSC->row; double *L_vals = L_CSC->val; int L_nb_elem = L_CSC->nb_elem; for ( k = 0; k < L_nb_elem; k++) { int row = L_rows[k]; double val = -U_val * L_vals[k]; int indice = schur_row_struct[row] - base; if ( indice >= 0 ) { S_vals[indice] += val; } else { S_vals[S_nb_elem] = val; S_rows[S_nb_elem] = row; schur_row_struct[row] = base + S_nb_elem++; } } } while(U_col->allocated - U_col->nb_elem < U_nb_elem) ParSHUM_U_col_realloc(U_col); memcpy(&U_col->val[U_col->nb_elem], U_vals, U_nb_elem * sizeof(*U_vals)); memcpy(&U_col->row[U_col->nb_elem], U_rows, U_nb_elem * sizeof(*U_rows)); U_col->nb_elem += U_nb_elem; S_new_nnz += (long) S_nb_elem - (long) CSC->nb_elem; CSC->nb_free += CSC->nb_elem; CSC->nb_elem = 0; needed_size = CSC->nb_free; if (needed_size < S_nb_elem) { while( needed_size < S_nb_elem ) needed_size *= 2; ParSHUM_CSC_alloc(S->internal_mem, CSC, needed_size, S->alignment); } ParSHUM_CSC_update_col_max_array(CSC, S_vals, S_rows, S_nb_elem, value_tol); int new = base + S_nb_elem; if (new < base ) { base = 1; bzero((void *) schur_row_struct, (size_t) S->n * sizeof(*schur_row_struct)); } else { base = new; } S->base[me] = base; } // for #pragma omp for schedule(guided, 10) for ( int i = 0; i < L_new_n; i++) { int k, l; int *schur_row_struct = S->data_struct[me]; int base = S->base[me]; int row = L_struct[i]; CSR_struct *CSR = &S->CSR[row]; int n = S->n; int *tmp = workspace[me]; int S_row_nb_elem = CSR->nb_elem; int *S_cols = CSR->col; int *L_cols; int S_nb_elem = 0 , L_nb_elem = n ; int needed_size; /* constructing schur_row_struct and discovering the vals of U */ for (k = 0; k < S_row_nb_elem; k++) { int S_col = S_cols[k]; if (invr_col_perm[S_col] != ParSHUM_UNUSED_PIVOT ) { tmp[--L_nb_elem] = S_col; } else { tmp[S_nb_elem] = S_col; schur_row_struct[S_col] = base + S_nb_elem++; } } L_cols = &tmp[L_nb_elem]; S_cols = tmp; L_nb_elem = n - L_nb_elem; for ( l = 0; l < L_nb_elem; l++) { int U_row = row_perms[invr_col_perm[L_cols[l]]]; int *U_cols = S->CSR[U_row].col; int U_nb_elem = S->CSR[U_row].nb_elem; for ( k = 0; k < U_nb_elem; k++) { int col = U_cols[k]; int indice = schur_row_struct[col] - base; if (invr_col_perm[col] != ParSHUM_UNUSED_PIVOT) { continue; } if ( indice < 0 ) { S_cols[S_nb_elem] = col; schur_row_struct[col] = base + S_nb_elem++; } } } CSR->nb_free += CSR->nb_elem; CSR->nb_elem = 0; needed_size = CSR->nb_free; if (needed_size < S_nb_elem) { while( needed_size < S_nb_elem ) needed_size *= 2; ParSHUM_CSR_alloc(S->internal_mem, CSR, needed_size, S->alignment); } memcpy(CSR->col, S_cols, S_nb_elem * sizeof(*S_cols)); CSR->nb_free -= S_nb_elem; CSR->nb_elem += S_nb_elem; int new = base + CSR->nb_elem; if (new < base ) { base = 1; bzero((void *) schur_row_struct, (size_t) S->n * sizeof(*schur_row_struct)); } else { base = new; } S->base[me] = base; } #pragma omp for schedule(guided, 10) for(int i = start; i < end; i++) { S->CSR[row_perms[i]].nb_free += S->CSR[row_perms[i]].nb_elem; S->CSR[row_perms[i]].nb_elem = 0; } } S->nnz += S_new_nnz; U->nnz += U_new_nnz; } ParSHUM_dense_matrix ParSHUM_schur_matrix_convert(ParSHUM_schur_matrix S, int done_pivots, int *col_perm, int *invr_col_perm, int *row_perm, int *invr_row_perm) { ParSHUM_dense_matrix self; int col, k, i; int n = S->n; int m = S->m; int n_schur = n - done_pivots; int m_schur = m - done_pivots; self = ParSHUM_dense_matrix_create(n_schur, m_schur); for(i = 0, k = done_pivots ; i < m && k < m; i++) if (invr_row_perm[i] == ParSHUM_UNUSED_PIVOT ) { row_perm[k] = i; invr_row_perm[i] = k++; } if (k != m) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "the conversion to dense matrix has failed"); for(i = 0, k = done_pivots ; i < n && k < n; i++) if (invr_col_perm[i] == ParSHUM_UNUSED_PIVOT ) { col_perm[k] = i; invr_col_perm[i] = k++; } if (k != n) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "the conversion to dense matrix has failed"); #pragma omp parallel for private(col, i) shared(S, invr_row_perm, col_perm) firstprivate(done_pivots, n, m_schur) //proc_bind(spread) for(col = done_pivots; col < n; col++){ CSC_struct *CSC = &S->CSC[col_perm[col]]; int local_row = (col - done_pivots) * m_schur; int nb_elem = CSC->nb_elem; double *CSC_vals = CSC->val; int *CSC_rows = CSC->row; for( i=0; i < nb_elem; i++) self->val[local_row + invr_row_perm[CSC_rows[i]] - done_pivots] = CSC_vals[i]; } return self; } void ParSHUM_schur_matrix_print(ParSHUM_schur_matrix self, char *mess) { int n = self->n; int m = self->m; printf("%s\n", mess); printf("PRINTING THE CSC PART\n"); for(int i = 0; i < n; i++) { CSC_struct *CSC = &self->CSC[i]; int *rows = CSC->row; double *vals = CSC->val; int nb_elem = CSC->nb_elem; printf("================%d======================\n", i); printf("Colum's max is %f\n", CSC->col_max); for(int j = 0; j < nb_elem; j++) printf("%d:(%e) ", rows[j], vals[j]); printf("\n"); } printf("\n\nPRINTING THE CSR PART\n"); for(int i = 0; i < m; i++) { CSR_struct *CSR = &self->CSR[i]; int *cols = CSR->col; int nb_elem = CSR->nb_elem; printf("================%d======================\n", i); for(int j = 0; j < nb_elem; j++) printf("(%d) ", cols[j]); printf("\n"); } printf("\n"); } void ParSHUM_schur_matrix_destroy(ParSHUM_schur_matrix self) { int i; free(self->CSC); free(self->CSR); ParSHUM_internal_mem_destroy(self->internal_mem); for( i = 0; i < self->nb_threads; i++) free(self->data_struct[i]); free(self->data_struct); free(self->base); for( i = 0; i < self->m; i++) omp_destroy_lock(&self->row_locks[i]); free(self->row_locks); for( i = 0; i < self->n; i++) omp_destroy_lock(&self->col_locks[i]); free(self->col_locks); free(self); } /* ********************************************************************************************* */ /* ********************************************************************************************* */ /* DEBBUG */ /* ********************************************************************************************* */ /* ********************************************************************************************* */ /* ********************************************************************************************* */ void ParSHUM_schur_check_doubles(ParSHUM_schur_matrix self) { int col, row, i, j, n = self->n, m = self->m; char mess[2048]; for(col = 0; col < n; col++) { CSC_struct *CSC = &self->CSC[col]; int *rows = CSC->row; int nb_elem = CSC->nb_elem; for (i = 0; i < nb_elem; i++) { row = rows[i]; for(j = i + 1; j < nb_elem; j++) if (rows[j] == row) { snprintf(mess, 2048, "in column %d, row %d is ducplicated on positions %d and %d", col, row, i, j); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } } } for(row = 0; row < m; row++) { CSR_struct *CSR = &self->CSR[row]; int *cols = CSR->col; int nb_elem = CSR->nb_elem; for (i = 0; i < nb_elem; i++) { int col = cols[i]; for(j = i + 1; j < nb_elem; j++) if (cols[j] == col) { snprintf(mess, 2048, "in row %d, col %d is ducplicated on positions %d and %d", row, col, i, j); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } } } } void ParSHUM_schur_matrix_check_pivots(ParSHUM_schur_matrix self, int *row_perms, int *col_perms, int *invr_row_perms, int *invr_col_perms, int nb_pivots) { int i, j, n = self->n, m = self->m; char mess[2048]; check_vlaid_perms(col_perms, invr_col_perms, n, nb_pivots, "col"); check_vlaid_perms(row_perms, invr_row_perms, m, nb_pivots, "row"); for( i = 0; i < nb_pivots; i++) { int row = row_perms[i], col = col_perms[i]; if ( self->CSC[col].nb_elem ) { snprintf(mess, 2048, "column %d is a pivot, but not empty in S with nb_elem %d and nb_free %d", col, self->CSC[col].nb_elem, self->CSC[col].nb_free); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } if ( self->CSR[row].nb_elem ) { snprintf(mess, 2048, "row %d is a pivot, but not empty in S with nb_elem %d and nb_free %d", row, self->CSR[row].nb_elem, self->CSR[row].nb_free); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } } for ( i = 0; i < n; i++) { CSC_struct *CSC = &self->CSC[i]; int nb_elem = CSC->nb_elem; int *rows = CSC->row; for ( j = 0; j < nb_elem; j++) { int row = rows[j]; if ( invr_row_perms[row] != ParSHUM_UNUSED_PIVOT ) { snprintf(mess, 2048, "in col %d, %d is present, but %d is a row pivot\n", i, row, row); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } } } for ( i = 0; i < m; i++) { CSR_struct *CSR = &self->CSR[i]; int nb_elem = CSR->nb_elem; int *cols = CSR->col; for ( j = 0; j < nb_elem; j++) { int col = cols[j]; if ( invr_col_perms[col] != ParSHUM_UNUSED_PIVOT ) { snprintf(mess, 2048, "in row %d, %d is present, but %d is a col pivot\n", i, col, col); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } } } } void ParSHUM_schur_matrix_memory_check(ParSHUM_schur_matrix self) { int i, n = self->n, m = self->m; char mess[2048]; for ( i = 0; i < n; i++) if (self->CSC[i].nb_free < 0 ) { snprintf(mess, 2048, "error on the column %d with nb_free %d\n", i, self->CSC[i].nb_free); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } for ( i = 0; i < m; i++) if (self->CSR[i].nb_free < 0 ) { snprintf(mess, 2048, "error on the row %d with nb_free %d\n", i, self->CSR[i].nb_free); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } /* TODO: rewrtie this */ /* for(i = 0; i < n; i++) */ /* { */ /* CSC_struct *CSC = &self->CSC[i]; */ /* free_space unused_CSC = self->unused_CSC; */ /* long CSC_begin = CSC->offset, CSC_end = CSC->offset + CSC->nb_elem + CSC->nb_free; */ /* int current_unused = 0, j; */ /* if (CSC_begin == CSC_end) */ /* continue; */ /* while(unused_CSC) */ /* { */ /* long free_begin = unused_CSC->offset, free_end = unused_CSC->offset + unused_CSC->nb_elem; */ /* int print = 0; */ /* ParSHUM_overlaps overlaped = check_overalping_regions(free_begin, free_end, CSC_begin, CSC_end); */ /* switch (overlaped) { */ /* case (ParSHUM_overlap_none) : */ /* break; */ /* case (ParSHUM_overlap_begin) : */ /* snprintf(mess, 2048, "The %d^th free space and the %d^th column are overlaping in the begining of the col (col start %ld end %ld, free starts on %ld ends on %ld).", */ /* current_unused, i, CSC_begin, CSC_end, free_begin, free_end); */ /* print = 1; */ /* break; */ /* case (ParSHUM_overlap_end) : */ /* snprintf(mess, 2048, "The %d^th free space and the %d^th column are overlaping in the end of the col (col start %ld end %ld, free starts on %ld ends on %ld).", */ /* current_unused, i, CSC_begin, CSC_end, free_begin, free_end); */ /* print = 1; */ /* break; */ /* case (ParSHUM_overlap_total) : */ /* snprintf(mess, 2048, "The %d^th free space and the %d^th column are overlapping (col starts at %ld and ends on %ld; free starts on %ld and ends on %ld).", */ /* current_unused, i, CSC_begin, CSC_end, free_begin, free_end); */ /* print = 1; */ /* break; */ /* default: */ /* break; */ /* } */ /* if (print) */ /* ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); */ /* current_unused++; */ /* unused_CSC = unused_CSC->next; */ /* } */ /* for(j = 0; j < n; j++) */ /* { */ /* if ( i == j) */ /* continue; */ /* CSC_struct *current_CSC = &self->CSC[j]; */ /* long current_CSC_begin = current_CSC->offset, current_CSC_end = current_CSC->offset + current_CSC->nb_elem + current_CSC->nb_free; */ /* int print = 0; */ /* if( current_CSC_begin == current_CSC_end) */ /* continue; */ /* ParSHUM_overlaps overlaped = check_overalping_regions(current_CSC_begin, current_CSC_end, CSC_begin, CSC_end); */ /* switch (overlaped) { */ /* case (ParSHUM_overlap_none) : */ /* break; */ /* case (ParSHUM_overlap_begin) : */ /* snprintf(mess, 2048, "The %d^th column and the %d^th column are overlaping in the begining of the col (col start %ld end %ld; col starts %ld ends on %ld).", */ /* j, i, current_CSC_begin , current_CSC_end, CSC_begin, CSC_end); */ /* print = 1; */ /* break; */ /* case (TP_overlap_end) : */ /* snprintf(mess, 2048, "The %d^th column and the %d^th column are overlaping in the end of the col (col start %ld end %ld; col starts %ld ends on %ld).", */ /* j, i, current_CSC_end, current_CSC_end, CSC_begin, CSC_end); */ /* print = 1; */ /* break; */ /* case (TP_overlap_total) : */ /* snprintf(mess, 2048, "The %d^th column and the %d^th column are overlapping (col starts at %ld and ends on %ld; col starts %ld ends on %ld).", */ /* j, i, current_CSC_end, current_CSC_end, CSC_begin, CSC_end); */ /* print = 1; */ /* break; */ /* default: */ /* break; */ /* } */ /* if (print) */ /* ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); */ /* } */ /* } */ /* for(i = 0; i < m; i++) */ /* { */ /* CSR_struct *CSR = &self->CSR[i]; */ /* free_space unused_CSR = self->unused_CSR; */ /* long CSR_begin = CSR->offset, CSR_end = CSR->offset + CSR->nb_elem + CSR->nb_free; */ /* int current_unused = 0, j; */ /* if (CSR_begin == CSR_end) */ /* continue; */ /* while(unused_CSR) */ /* { */ /* long free_begin = unused_CSR->offset, free_end = unused_CSR->offset + unused_CSR->nb_elem; */ /* int print = 0; */ /* ParSHUM_overlaps overlaped = check_overalping_regions(free_begin, free_end, CSR_begin, CSR_end); */ /* switch (overlaped) { */ /* case (ParSHUM_overlap_none) : */ /* break; */ /* case (ParSHUM_overlap_begin) : */ /* snprintf(mess, 2048, "The %d^th free space and the %d^th row are overlaping in the begining of the col (row start %ld end %ld, free starts on %ld ends on %ld).", */ /* current_unused, i, CSR_begin, CSR_end, free_begin, free_end); */ /* print = 1; */ /* break; */ /* case (ParSHUM_overlap_end) : */ /* snprintf(mess, 2048, "The %d^th free space and the %d^th row are overlaping in the end of the col (row start %ld end %ld, free starts on %ld ends on %ld).", */ /* current_unused, i, CSR_begin, CSR_end, free_begin, free_end); */ /* print = 1; */ /* break; */ /* case (ParSHUM_overlap_total) : */ /* snprintf(mess, 2048, "The %d^th free space and the %d^th column are overlapping (col starts at %ld and ends on %ld; free starts on %ld and ends on %ld).", */ /* current_unused, i, CSR_begin, CSR_end, free_begin, free_end); */ /* print = 1; */ /* break; */ /* default: */ /* break; */ /* } */ /* if (print) */ /* ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); */ /* current_unused++; */ /* unused_CSR = unused_CSR->next; */ /* } */ /* for(j = 0; j < m; j++) */ /* { */ /* if ( i == j) */ /* continue; */ /* CSR_struct *current_CSR = &self->CSR[j]; */ /* long current_CSR_begin = current_CSR->offset, current_CSR_end = current_CSR->offset + current_CSR->nb_elem + current_CSR->nb_free; */ /* int print = 0; */ /* if( current_CSR_begin == current_CSR_end) */ /* continue; */ /* ParSHUM_overlaps overlaped = check_overalping_regions(current_CSR_begin, current_CSR_end, CSR_begin, CSR_end); */ /* switch (overlaped) { */ /* case (ParSHUM_overlap_none) : */ /* break; */ /* case (ParSHUM_overlap_begin) : */ /* snprintf(mess, 2048, "The %d^th row and the %d^th column are overlaping in the begining of the row (row start %ld end %ld; row starts %ld ends on %ld).", */ /* j, i, current_CSR_begin , current_CSR_end, CSR_begin, CSR_end); */ /* print = 1; */ /* break; */ /* case (ParSHUM_overlap_end) : */ /* snprintf(mess, 2048, "The %d^th row and the %d^th row are overlaping in the end of the row (row start %ld end %ld; row starts %ld ends on %ld).", */ /* j, i, current_CSR_end, current_CSR_end, CSR_begin, CSR_end); */ /* print = 1; */ /* break; */ /* case (ParSHUM_overlap_total) : */ /* snprintf(mess, 2048, "The %d^th column and the %d^th column are overlapping (col starts at %ld and ends on %ld; col starts %ld ends on %ld).", */ /* j, i, current_CSR_end, current_CSR_end, CSR_begin, CSR_end); */ /* print = 1; */ /* break; */ /* default: */ /* break; */ /* } */ /* if (print) */ /* ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); */ /* } */ /* } */ } void ParSHUM_schur_matrix_check_symetry(ParSHUM_schur_matrix self) { long CSC_nnz = 0, CSR_nnz = 0; int i, j, n = self->n, m = self->m, row, col; char mess[2048]; for(i = 0; i < n; i++) CSC_nnz += self->CSC[i].nb_elem; for(i = 0; i < m; i++) CSR_nnz += self->CSR[i].nb_elem; if (CSC_nnz != CSR_nnz) { snprintf(mess, 2048, "CSR and CSC nnz are not the same, CSC = %ld and CSR = %ld", CSC_nnz, CSR_nnz); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } if (CSC_nnz != self->nnz) { snprintf(mess, 2048, "CSC and S nnz are not the same, CSC = %ld and S_nnz = %ld", CSC_nnz, self->nnz); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } if (CSR_nnz != self->nnz) { snprintf(mess, 2048, "CSR and S nnz are not the same, CSR = %ld and S_nnz = %ld", CSR_nnz, self->nnz); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } for(col = 0; col < n; col++) { CSC_struct *CSC = &self->CSC[col]; int *rows = CSC->row; int col_nb_elem = CSC->nb_elem; for(i = 0; i < col_nb_elem; i++) { row = rows[i]; CSR_struct *CSR = &self->CSR[row]; int row_nb_elem = CSR->nb_elem; int *cols = CSR->col; int found = 0; for(j = 0; j < row_nb_elem; j++) { if(cols[j] == col) { found = 1; break; } } if (!found) { snprintf(mess, 2048, "In CSC in col %d row %d exists, but in CSR in row %d, col %d does not exist", col, row, row, col); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } } /* for I */ } for(row = 0; row < m; row++) { CSR_struct *CSR = &self->CSR[row]; int *cols = CSR->col; int row_nb_elem = CSR->nb_elem; for(i = 0; i < row_nb_elem; i++) { col = cols[i]; CSC_struct *CSC = &self->CSC[col]; int col_nb_elem = CSC->nb_elem; int *rows = CSC->row; int found = 0; for(j = 0; j < col_nb_elem; j++) { if(rows[j] == row) { found = 1; break; } } if (!found) { snprintf(mess, 2048, "In CSR in row %d col %d exists, but in CSC in col %d, row %d does not exist", row, col, col, row); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } } /* for I */ } } /* TODO: addapt this to the new thing */ void ParSHUM_print_GB(ParSHUM_schur_matrix self, char *mess) { /* free_space CSC = self->unused_CSC; */ /* free_space CSR = self->unused_CSR; */ /* fprintf(stdout,"%s\n", mess); */ /* if (CSC->previous) */ /* ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "the first CSC free memory has a predecessor"); */ /* if (CSR->previous) */ /* ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "the first CSR free memory has a predecessor"); */ /* fprintf(stdout, "|================||=================|\n"); */ /* fprintf(stdout, "| CSC || CSR |\n"); */ /* fprintf(stdout, "|================||=================|\n"); */ /* fprintf(stdout, "| address || address |\n"); */ /* fprintf(stdout, "| nb_elem || nb_elem |\n"); */ /* fprintf(stdout, "| offset || offset |\n"); */ /* fprintf(stdout, "|================||=================|\n\n"); */ /* fprintf(stdout, "|===================================|\n"); */ /* while( CSC || CSR) { */ /* if (CSC) */ /* fprintf(stdout, "| %14p |", &CSC); */ /* else */ /* fprintf(stdout, " "); */ /* if (CSR) */ /* fprintf(stdout, "| %14p |\n", &CSR); */ /* else */ /* fprintf(stdout, "\n"); */ /* if (CSC) */ /* fprintf(stdout, "| %11ld |", CSC->nb_elem); */ /* else */ /* fprintf(stdout, " "); */ /* if (CSR) */ /* fprintf(stdout, "| %11ld |\n", CSR->nb_elem); */ /* else */ /* fprintf(stdout, "\n"); */ /* if (CSC) */ /* fprintf(stdout, "| %11ld |", CSC->offset); */ /* else */ /* fprintf(stdout, " "); */ /* if (CSR) */ /* fprintf(stdout, "| %11ld |\n", CSR->offset); */ /* else */ /* fprintf(stdout, "\n"); */ /* fprintf(stdout, "|===================================|\n"); */ /* if (CSC) */ /* if (CSC->next) */ /* if (CSC->next->previous != CSC) */ /* ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "the CSC free memory's next cell, has a predecessor different from CSC"); */ /* if (CSR) */ /* if (CSR->next) */ /* if (CSR->next->previous != CSR) */ /* ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "the CSR free memory's next cell, has a predecessor different from CSR"); */ /* if (CSC) */ /* CSC=CSC->next; */ /* if (CSR) */ /* CSR=CSR->next; */ /* } */ } /* TODO: addapt this to the new thing */ /* void */ /* ParSHUM_print_single_GB(free_space self, char *mess) */ /* { */ /* fprintf(stdout,"%s\n", mess); */ /* if (self->previous) */ /* ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "the first CSR free memory has a predecessor"); */ /* fprintf(stdout, "|================|\n"); */ /* fprintf(stdout, "| address |\n"); */ /* fprintf(stdout, "| nb_elem |\n"); */ /* fprintf(stdout, "| offset |\n"); */ /* fprintf(stdout, "|================|\n\n"); */ /* fprintf(stdout, "|================|\n"); */ /* while(self) */ /* { */ /* fprintf(stdout, "| %14p |\n", &self); */ /* fprintf(stdout, "| %11ld |\n", self->nb_elem); */ /* fprintf(stdout, "| %11ld |\n", self->offset); */ /* fprintf(stdout, "|================|\n"); */ /* if (self->next) */ /* if (self->next->previous != self) */ /* ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "the free memory's next cell, has a predecessor different from CSC"); */ /* self = self->next; */ /* } */ /* } */ void ParSHUM_check_current_counters(ParSHUM_schur_matrix self, int *col_perm, int *row_perm, int nb_perms, int *col_count, int *row_count, int base) { int pivot, i, n = self->n, m = self->m; int *_col_count = calloc( n, sizeof(*_col_count)); int *_row_count = calloc( m, sizeof(*_row_count)); char mess[2048]; for( pivot = 0; pivot < nb_perms; pivot++) { int col = col_perm[pivot]; CSC_struct *CSC = &self->CSC[col]; int *rows = CSC->row; int nb_elem = CSC->nb_elem; for( i = 0; i < nb_elem; i++) _row_count[rows[i]]++; int row = row_perm[pivot]; CSR_struct *CSR = &self->CSR[row]; int *cols = CSR->col; nb_elem = CSR->nb_elem; for( i = 0; i < nb_elem; i++) _col_count[cols[i]]++; } for( pivot = 0; pivot < nb_perms; pivot++) { int col = col_perm[pivot]; int row = row_perm[pivot]; if ( _row_count[row] != 1) { snprintf(mess, 2048, "calculated row_count[%d] = %d, but %d is a pivot", row, _row_count[i], row); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } if ( row_count[row] != base) { snprintf(mess, 2048, "row_count[%d] = %d, base = %d, but %d is a pivot", row, row_count[i], base, row); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } if ( _col_count[col] != 1) { snprintf(mess, 2048, "calculated col_count[%d] = %d, but %d is a pivot", col, _col_count[i], row); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } if ( row_count[row] != base) { snprintf(mess, 2048, "col_count[%d] = %d, base = %d, but %d is a pivot", col, col_count[i], base, col); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } } for( i = 0; i < n; i++) { if (col_count[i] >= base) if ( ( col_count[i] - base + 1 ) != _col_count[i]) { snprintf(mess, 2048, "error on col counter %d : col_count(%d) base(%d) and calculated col_count(%d)", i, col_count[i], base, _col_count[i]); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } if (row_count[i] >= base) if ( ( row_count[i] - base + 1 ) != _row_count[i]) { snprintf(mess, 2048, "error on row counter %d : row_count(%d) base(%d) and calculated row_count(%d)", i, row_count[i], base, _row_count[i]); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } } free(_col_count); free(_row_count); }
scalability.c
/** * \file * \brief libbomp test. */ /* * Copyright (c) 2007, 2008, 2009, ETH Zurich. * All rights reserved. * * This file is distributed under the terms in the attached LICENSE file. * If you do not find this file, copies can be found by writing to: * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group. */ #include <stdio.h> #include <omp.h> #include <stdlib.h> #include <stdint.h> #include <assert.h> #ifdef POSIX static inline uint64_t rdtsc(void) { uint32_t eax, edx; __asm volatile ("rdtsc" : "=a" (eax), "=d" (edx)); return ((uint64_t)edx << 32) | eax; } #endif #define N 10000000 int main(int argc, char *argv[]) { uint64_t begin, end; int i; static int a[N]; assert(argc == 2); #ifndef POSIX bomp_bomp_init(atoi(argv[1])); #endif omp_set_num_threads(atoi(argv[1])); for (i=0;i<N;i++) a[i]= 2*i; begin = rdtsc(); #pragma omp parallel for for (i=0;i<N;i++) a[i]= 2*i; end = rdtsc(); printf("Value of sum is %d, time taken %lu\n", 0, end - begin); }
GB_unop__identity_fp64_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp64_uint32) // op(A') function: GB (_unop_tran__identity_fp64_uint32) // C type: double // A type: uint32_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp64_uint32) ( double *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
launch.h
#include <atomic> typedef struct { int gridSize; // Number of thread blocks per grid int blockSize; // Number of threads per thread block int smemSize; // Shared Memory Size int stream; // associated stream } launchConfig; static int *kernels = nullptr; static std::atomic<unsigned long> num_kernels = {0}; static std::atomic<unsigned long> synced_kernels = {0}; /// Kernel launch template <typename Ty, typename Func, Func kernel, typename... Args> void launch(const launchConfig &config, Ty *ptrA, Ty *ptrB, Args... args) { int kernel_no = num_kernels++; #pragma omp target teams is_device_ptr(ptrA, ptrB) num_teams(config.gridSize) \ thread_limit(config.blockSize) depend(out \ : kernels[kernel_no]) nowait { #pragma omp parallel { kernel(ptrA, ptrB, args...); } } } /// Device Synchronization void synchronize() { unsigned long kernel_first = synced_kernels; unsigned long kernel_last = num_kernels; if (kernel_first < kernel_last) { for (unsigned long i = kernel_first; i < kernel_last; ++i) { #pragma omp task if(0) depend(in : kernels[i]) {} } synced_kernels.compare_exchange_strong(kernel_first, kernel_last); } }
Process.h
#ifndef PROCESS_H_ #define PROCESS_H_ /* ========================================================================= Copyright (c) 2008-2015, Institute for Microelectronics, TU Wien. ----------------- ViennaTS - The Vienna Topography Simulator ----------------- Contact: viennats@iue.tuwien.ac.at License: MIT (X11), see file LICENSE in the base directory ============================================================================= */ #include "Time.h" #include "calc.h" #include <vector> #include <fstream> #include <string> #include <list> #include <algorithm> #include <iostream> #define BOOST_NO_HASH #include <boost/graph/adjacency_list.hpp> #include <boost/graph/connected_components.hpp> #include "message.h" #include "Partition/PartitionNeighborLinksArrays.h" #include "Partition/PartitionUpDownLinkTree.h" #include "Partition/PartitionFullGrid.h" #include "./LSlib/vector.hpp" #include "boundaries.h" ///Process related objects and methods. namespace proc { template <class LevelSetType> void AddLayer(std::list<LevelSetType>& LS, int num_layers) { for (int i=0;i<num_layers;++i) { LS.push_back(LS.back()); LS.back().set_levelset_id(); } for (int i=0;i>num_layers;--i) { assert(LS.size()>=2); LS.erase((LS.end()--)--); } } template <class LevelSetsType> void DetermineTopMostLayer( const LevelSetsType& LS, std::vector<unsigned int>& PointMaterials) { //this function determines the materials of the most top levelset typedef typename LevelSetsType::value_type LevelSetType; PointMaterials.clear(); PointMaterials.resize(LS.back().num_active_pts()); typename LevelSetType::points_type segmentation=LS.back().get_new_segmentation(); #pragma omp for schedule(static, 1) // parallelization - Iterations divided into chunks of size 1. Each chunk is assigned to a thread for (int p=0;p<= static_cast<int>(segmentation.size());++p) { typename LevelSetType::point_type begin_v=(p==0)?LS.back().grid().min_point_index():segmentation[p-1]; typename LevelSetType::point_type end_v=(p!=static_cast<int>(segmentation.size()))?segmentation[p]:LS.back().grid().increment_indices(LS.back().grid().max_point_index()); //iterator necessary to access std::vector< typename LevelSetType::const_iterator_runs> ITs; for (typename LevelSetsType::const_iterator it=LS.begin();&(*it)!=&(LS.back());++it) ITs.push_back(typename LevelSetType::const_iterator_runs(*it,begin_v)); for (typename LevelSetType::const_iterator_runs it(LS.back(),begin_v );it.start_indices()<end_v;it.next()) { if (!it.is_active()) continue; const typename LevelSetType::value_type d=it.value2(); int z=LS.size()-1; for (;z>0;z--) { ITs[z-1].go_to_indices_sequential(it.start_indices()); if (d<ITs[z-1].value()) break; } PointMaterials[it.active_pt_id2()]=LS.size()-1-z; } } } namespace { template <class I1, class I2> bool connected(const I1& it1, const I2& it2) { return (it1.sign()==it2.sign()); } } template <class LStype> std::pair<unsigned int, unsigned int> CalculateConnectivities( const LStype& l, std::vector<bool>& Connectivities, bool is_open_boundary_negative) { const int D=LStype::dimensions; boost::adjacency_list <boost::setS, boost::vecS, boost::undirectedS> Graph; unsigned int num_components=0; //unsigned int total_number_of_runs=0; //allocate memory for component list // std::vector<int> comp_lst[l.number_of_segments()][D+1]; // std::vector<int> *comp_lst = new std::vector<int> [l.number_of_segments()][D+1]; std::vector<int>** comp_lst = new std::vector<int>* [l.number_of_segments()]; for (unsigned int i=0;i<l.number_of_segments();++i) { comp_lst[i] = new std::vector<int>[D+1]; } for (unsigned int sub=0;sub<l.number_of_segments();++sub) { for (int i = -1;i<D;++i) { comp_lst[sub][i+1].resize(l.number_of_runs(i,sub),-1); //total_number_of_runs+=l.number_of_runs(i,sub); } } bool is_first_run=true; int node_of_first_run=0; int node_of_last_run=0; //cycle through for (typename LStype::template const_iterator_neighbor_filtered<typename LStype::filter_all,1> it(l);!it.is_finished();it.next()) { int & tc = comp_lst[it.center().get_segment_num()][it.center().get_level()][it.center().run_type_position()]; if (tc==-1) { for (int k=0;k<2*D;++k) { const int & tn= comp_lst[it.neighbor(k).get_segment_num()][it.neighbor(k).get_level()][it.neighbor(k).run_type_position()]; if (tn!=-1) { if (connected(it.center(),it.neighbor(k))) { tc=tn; break; } } } } if (tc==-1) { tc=num_components; boost::add_vertex(Graph); ++num_components; } for (int k=0;k<2*D;++k) { int & tn= comp_lst[it.neighbor(k).get_segment_num()][it.neighbor(k).get_level()][it.neighbor(k).run_type_position()]; if (connected(it.center(),it.neighbor(k))) { if (tn!=-1) { if (tc!=tn) boost::add_edge(tc,tn,Graph); } else { tn=tc; } } } if (is_first_run) { is_first_run=false; node_of_first_run=tc; } node_of_last_run=tc; } assert(boost::num_vertices(Graph)==num_components); std::vector<int> component(boost::num_vertices(Graph)); unsigned int num_components_after = connected_components(Graph, &component[0]); //determine component number of source region int source_node=(is_open_boundary_negative)?component[node_of_first_run]:component[node_of_last_run]; Connectivities.clear(); for (typename LStype::template const_iterator_neighbor_filtered<typename LStype::filter_active,1> it(l);!it.is_finished();it.next()) { if (it.center().sign()==lvlset::POS_SIGN) { assert(it.center().get_level()==0); assert(it.center().get_segment_num()<l.number_of_segments()); Connectivities.push_back(component[comp_lst[it.center().get_segment_num()][0][it.center().run_type_position()]]==source_node); //TODO } else { int k; for (k=0;k<2*D;++k) { if (component[comp_lst[it.neighbor(k).get_segment_num()][it.neighbor(k).get_level()][it.neighbor(k).run_type_position()]]==source_node) break; } Connectivities.push_back(k!=2*D); } } for(unsigned int i=0;i<l.number_of_segments();++i) { delete[] comp_lst[i]; } delete[] comp_lst; return std::make_pair(num_components, num_components_after); } template <class LStype> void CalculateVisibilities( const LStype& l, std::vector<bool>& Visibilities, int open_boundary_direction, bool is_open_boundary_negative) { const int D=LStype::dimensions; const typename LStype::value_type max=std::numeric_limits<typename LStype::value_type>::max(); Visibilities.resize(l.num_active_pts()); std::vector<typename LStype::index_type> old_indices(D-1-open_boundary_direction, std::numeric_limits<typename LStype::index_type>::max()); unsigned int size=1; for (int i=0;i<open_boundary_direction;++i) { assert(!l.grid().is_pos_boundary_infinite(i)); assert(!l.grid().is_neg_boundary_infinite(i)); size*=(l.grid().max_point_index(i)-l.grid().min_point_index(i)+1); } std::vector<typename LStype::value_type> min_values(size, max); typename LStype::size_type id=0; typename LStype::const_iterator_runs it(l,!is_open_boundary_negative); while (!it.is_finished()) { for (int i=0;i<D-1-open_boundary_direction;++i) { bool b=false; if (old_indices[i]!=it.start_indices(i+open_boundary_direction+1)) { old_indices[i]=it.start_indices(i+open_boundary_direction+1); b=true; } if (b) min_values.assign(size,max); } unsigned int pos_begin=0; unsigned int pos_end=0; for (int i=open_boundary_direction-1;i>=0;--i) { pos_begin*=(l.grid().max_point_index(i)-l.grid().min_point_index(i)+1); pos_end*=(l.grid().max_point_index(i)-l.grid().min_point_index(i)+1); pos_begin+=(it.start_indices(i)-l.grid().min_point_index(i)); pos_end+=(it.end_indices(i)-l.grid().min_point_index(i)); } if (it.is_active()) { Visibilities[is_open_boundary_negative?id:(l.num_active_pts()-1-id)]=(it.value()<min_values.at(pos_begin)); ++id; } for (unsigned int i=pos_begin; i<=pos_end;++i) min_values.at(i)=std::min(min_values.at(i), it.value()); if (is_open_boundary_negative) { it.next(); } else { it.previous(); } } assert(id==l.num_active_pts()); } namespace { ///Holds information about the velocities of grid points template <class ModelType, int Dimensions> class VelocityClass { const ModelType& Model; const double* NormalVector; const double* Coverages; const double* Rates; const std::vector<bool>& Connectivities; const std::vector<bool>& Visibilities; public: VelocityClass( const ModelType& m, const double * n, const double * c, const double * r, const std::vector<bool>& co, const std::vector<bool>& vi ) : Model(m), NormalVector(n), Coverages(c), Rates(r), Connectivities(co), Visibilities(vi) {} double operator()(unsigned int active_pt,int matnum) const { double v; Model.CalculateVelocity( v, calc::Make3DVector<Dimensions>(NormalVector+active_pt*Dimensions), Coverages+active_pt*Model.CoverageStorageSize, Rates+active_pt*Model.RatesStorageSize, matnum, (Model.CalculateConnectivities)?Connectivities[active_pt]:true, (Model.CalculateVisibilities)?Visibilities[active_pt]:true ); return v; } }; ///Holds information about velocities of grid points. template <class ModelType, int Dimensions> class VelocityClass2 { const ModelType& Model; const double* NormalVector; const double* Coverages; const double* Rates; const std::vector<bool>& Connectivities; const std::vector<bool>& Visibilities; public: VelocityClass2( const ModelType& m, const double * n, const double * c, const double * r, const std::vector<bool>& co, const std::vector<bool>& vi ) : Model(m), NormalVector(n), Coverages(c), Rates(r), Connectivities(co), Visibilities(vi) {} void scalar_velocity(double & v, unsigned int active_pt,int matnum) const { Model.CalculateVelocity( v, calc::Make3DVector<Dimensions>(NormalVector+active_pt*Dimensions), Coverages+active_pt*Model.CoverageStorageSize, Rates+active_pt*Model.RatesStorageSize, matnum, (Model.CalculateConnectivities)?Connectivities[active_pt]:true, (Model.CalculateVisibilities)?Visibilities[active_pt]:true); } void vector_velocity(double* v, unsigned int active_pt, double location, int matnum) const { Model.CalculateVectorVelocity( v, calc::Make3DVector<Dimensions>(NormalVector+active_pt*Dimensions), Coverages+active_pt*Model.CoverageStorageSize, Rates+active_pt*Model.RatesStorageSize, matnum, (Model.CalculateConnectivities)?Connectivities[active_pt]:true, (Model.CalculateVisibilities)?Visibilities[active_pt]:true); } }; ///Holds all information about simulation in series data. template <class ModelType, int Dimensions> class DataAccessClass { const ModelType& Model; const double* Coverages; const double* Rates; const double* NormalVector; const std::vector<unsigned int>& Materials; const std::vector<bool>& Connectivities; const std::vector<bool>& Visibilities; bool OutputVelocities; bool OutputCoverages; bool OutputRates; bool OutputMaterials; public: DataAccessClass( const ModelType& m, const double * c, const double * r, const double * n, const std::vector<unsigned int>& ma, const std::vector<bool>& co, const std::vector<bool>& vi, bool out_v=false, bool out_c=false, bool out_r=false, bool out_m=false ) : Model(m), Coverages(c), Rates(r), NormalVector(n), Materials(ma), Connectivities(co), Visibilities(vi), OutputVelocities(out_v), OutputCoverages(out_c), OutputRates(out_r), OutputMaterials(out_m) {} int number_of_series() const { return (1+ModelType::CoverageStorageSize+ModelType::RatesStorageSize+1); } template<class PT_ID_TYPE> double get_series_data_double(PT_ID_TYPE active_pt_id, int series) const { if (series==0) { double v=0.; unsigned int mat=0; bool connected=true; bool visible=true; if (Materials.size()>0) mat= Materials[active_pt_id]; if (Connectivities.size()>0) connected=Connectivities[active_pt_id]; if (Visibilities.size()>0) visible=Visibilities[active_pt_id]; Model.CalculateVelocity( v, calc::Make3DVector<Dimensions>(NormalVector+active_pt_id*Dimensions), Coverages+active_pt_id*Model.CoverageStorageSize, Rates+active_pt_id*Model.RatesStorageSize, mat, connected, visible ); return v; } else if (series<=ModelType::CoverageStorageSize) { return Coverages[active_pt_id*ModelType::CoverageStorageSize+series-1]; } else if (series<=ModelType::CoverageStorageSize+ModelType::RatesStorageSize) { return Rates[active_pt_id*ModelType::RatesStorageSize+series-ModelType::CoverageStorageSize-1]; } else { unsigned int mat=0; if (Materials.size()>0) mat= Materials[active_pt_id]; return mat; } return 0.; } template <class PT_ID_TYPE> std::string get_series_data(PT_ID_TYPE active_pt_id, int series) const { std::ostringstream out; if (series==0) { double v=0.; unsigned int mat=0; bool connected=true; bool visible=true; if (Materials.size()>0) mat= Materials[active_pt_id]; if (Connectivities.size()>0) connected=Connectivities[active_pt_id]; if (Visibilities.size()>0) visible=Visibilities[active_pt_id]; Model.CalculateVelocity( v, calc::Make3DVector<Dimensions>(NormalVector+active_pt_id*Dimensions), Coverages+active_pt_id*Model.CoverageStorageSize, Rates+active_pt_id*Model.RatesStorageSize, mat, connected, visible ); out << static_cast<float>(v); } else if (series<=ModelType::CoverageStorageSize) { out << static_cast<float>(Coverages[active_pt_id*ModelType::CoverageStorageSize+series-1]); } else if (series<=ModelType::CoverageStorageSize+ModelType::RatesStorageSize) { out << static_cast<float>(Rates[active_pt_id*ModelType::RatesStorageSize+series-ModelType::CoverageStorageSize-1]); } else { unsigned int mat=0; if (Materials.size()>0) mat= Materials[active_pt_id]; out << mat; } return out.str(); } std::string get_series_label(int series) const { if (series==0) { return std::string("Velocities"); } else if (series<=ModelType::CoverageStorageSize) { std::ostringstream out; out << "Coverage" << series-1; return out.str(); } else if (series<=ModelType::CoverageStorageSize+ModelType::RatesStorageSize) { std::ostringstream out; out << "Rate" << series-ModelType::CoverageStorageSize-1; return out.str(); } else { return std::string("Material"); } } std::string get_series_type(int series) const { if (series<=ModelType::CoverageStorageSize+ModelType::RatesStorageSize) { return std::string("float"); } else { return std::string("int"); } } bool get_series_output(int series) const { if (series==0) { return OutputVelocities; } else if (series<=ModelType::CoverageStorageSize) { return OutputCoverages; } else if (series<=ModelType::CoverageStorageSize+ModelType::RatesStorageSize) { return OutputRates; } else { return OutputMaterials; } } }; } template <class LevelSetsType, class ParameterType, class ProcessParameterType , class OutputInfoType> void ExecuteProcess( LevelSetsType& LevelSets, const model::Planarization& Model, const ParameterType& Parameter, const ProcessParameterType& ProcessParameter, OutputInfoType & output_info ) { typedef typename LevelSetsType::value_type LevelSetType; LevelSets.push_back(LevelSetType(LevelSets.back().grid(), Model.get_coordinate()/Parameter.grid_delta, Parameter.open_boundary, Parameter.open_boundary_negative)); for (typename LevelSetsType::iterator it=LevelSets.begin();&(*it)!=&(LevelSets.back());++it) { it->max(LevelSets.back()); //adjust all level set functions below the plane it->prune(); //remove grid points which do not have at least one opposite signed neighbor it->segment(); } if (!Model.fill_up()) LevelSets.pop_back(); else LevelSets.back().set_levelset_id(); // we introduced new material, so it needs an ID //TODO output and time } template <class LevelSetsType, class ParameterType, class ProcessParameterType, class OutputInfoType> void ExecuteProcess( LevelSetsType& LevelSets, const model::Mask& Model, const ParameterType& Parameter, const ProcessParameterType& ProcessParameter, OutputInfoType & output_info ) { typedef typename LevelSetsType::value_type LevelSetType; const int D=LevelSetType::dimensions; geometry::geometry<D> mask_geometry; geometry::surface<D> mask_surface; LevelSetType mask_ls(LevelSets.back().grid()); if(Model.file_name().find(".lvst") != std::string::npos){ mask_ls.import_levelset(Model.file_name()); } else { if (Model.surface()) { mask_surface.ReadVTK(Model.file_name(), Parameter.input_scale, Parameter.input_transformation, Parameter.input_transformation_signs, Parameter.change_input_parity, Parameter.input_shift); } else { mask_geometry.Read(Model.file_name(),Parameter.input_scale,Parameter.input_transformation, Parameter.input_transformation_signs, Parameter.change_input_parity, Parameter.material_mapping, Parameter.input_shift, Parameter.ignore_materials); } // manually set min and max to match original simulation for(unsigned i=0; i<D; ++i){ if(LevelSets.back().grid().boundary_conditions(i) != lvlset::INFINITE_BOUNDARY){ mask_geometry.Min[i] = LevelSets.back().grid().min_grid_index(i)*Parameter.grid_delta; mask_geometry.Max[i] = LevelSets.back().grid().max_grid_index(i)*Parameter.grid_delta; } } // mask_geometry.Read(Model.file_name(), Parameter.input_scale, Parameter.input_transformation, Parameter.input_transformation_signs, Parameter.change_input_parity, Parameter.material_mapping, Parameter.input_shift, Parameter.ignore_materials); typedef std::list<geometry::surface<D> > SurfacesType; SurfacesType Surfaces; if (Model.surface()) { Surfaces.push_back(mask_surface); } else { std::bitset<2*D> remove_flags; for (int i=0;i<D;++i) { if (Parameter.boundary_conditions[i].min==bnc::PERIODIC_BOUNDARY || Parameter.boundary_conditions[i].min==bnc::REFLECTIVE_BOUNDARY || Parameter.boundary_conditions[i].min==bnc::EXTENDED_BOUNDARY) { remove_flags.set(i); } else if (i==Parameter.open_boundary && !Parameter.open_boundary_negative && Model.remove_bottom()) { remove_flags.set(i); } if (Parameter.boundary_conditions[i].min==bnc::PERIODIC_BOUNDARY || Parameter.boundary_conditions[i].min==bnc::REFLECTIVE_BOUNDARY || Parameter.boundary_conditions[i].min==bnc::EXTENDED_BOUNDARY) { remove_flags.set(i+D); } else if (i==Parameter.open_boundary && Parameter.open_boundary_negative && Model.remove_bottom()) { remove_flags.set(i+D); } } msg::print_start("Extract surface and interfaces..."); geometry::TransformGeometryToSurfaces(mask_geometry, Surfaces, remove_flags, Parameter.grid_delta*Parameter.snap_to_boundary_eps, Parameter.report_import_errors); msg::print_done(); } msg::print_start("Distance transformation..."); //LevelSetType mask_ls(LevelSets.back().grid()); init(mask_ls,Surfaces.back(),Parameter.report_import_errors); msg::print_done(); } // only put mask, where no other LS was before if(!Model.ignore_other_materials()){ mask_ls.invert(); for(auto LS=LevelSets.begin(); LS != LevelSets.end(); ++LS){ mask_ls.min(*LS); } mask_ls.invert(); } // wrap all higher levelsets around mask before pushing it to the front for(auto LS=LevelSets.begin(); LS != LevelSets.end(); ++LS){ LS->min(mask_ls); } // now put the mask as the lowest levelset LevelSets.push_front(mask_ls); LevelSets.front().set_levelset_id(); //TODO output and time } template <class LevelSetsType, class ParameterType, class ProcessParameterType, class OutputInfoType> void ExecuteProcess( LevelSetsType& LevelSets, const model::BooleanOps& Model, const ParameterType& Parameter, const ProcessParameterType& ProcessParameter, OutputInfoType & output_info ) { typedef typename LevelSetsType::value_type LevelSetType; const int D=LevelSetType::dimensions; LevelSetType* boolop_ls; if(!Model.file_name().empty()){ geometry::geometry<D> boolop_geometry; geometry::surface<D> boolop_surface;// = new geometry::surface<D>; if (Model.surface()) { boolop_surface.ReadVTK(Model.file_name(), Parameter.input_scale, Parameter.input_transformation, Parameter.input_transformation_signs, Parameter.change_input_parity, Parameter.input_shift); } else { boolop_geometry.Read(Model.file_name(),Parameter.input_scale,Parameter.input_transformation, Parameter.input_transformation_signs, Parameter.change_input_parity, Parameter.material_mapping, Parameter.input_shift, Parameter.ignore_materials); } typedef std::list<geometry::surface<D> > SurfacesType; SurfacesType Surfaces; if (Model.surface()) { Surfaces.push_back(boolop_surface); } else { std::bitset<2*D> remove_flags; for (int i=0;i<D;++i) { if (Parameter.boundary_conditions[i].min==bnc::PERIODIC_BOUNDARY || Parameter.boundary_conditions[i].min==bnc::REFLECTIVE_BOUNDARY || Parameter.boundary_conditions[i].min==bnc::EXTENDED_BOUNDARY) { remove_flags.set(i); } else if (i==Parameter.open_boundary && !Parameter.open_boundary_negative && Model.remove_bottom()) { remove_flags.set(i); } if (Parameter.boundary_conditions[i].min==bnc::PERIODIC_BOUNDARY || Parameter.boundary_conditions[i].min==bnc::REFLECTIVE_BOUNDARY || Parameter.boundary_conditions[i].min==bnc::EXTENDED_BOUNDARY) { remove_flags.set(i+D); } else if (i==Parameter.open_boundary && Parameter.open_boundary_negative && Model.remove_bottom()) { remove_flags.set(i+D); } } //std::cout << "transform to surface\n"; geometry::TransformGeometryToSurfaces(boolop_geometry, Surfaces, remove_flags, Parameter.grid_delta*Parameter.snap_to_boundary_eps, Parameter.report_import_errors); } LevelSetType dummy_ls(LevelSets.back().grid()); init(dummy_ls,Surfaces.back(),Parameter.report_import_errors); boolop_ls = &dummy_ls; } else if(Model.levelset()>=0){ //If internal levelset should be used typename LevelSetsType::iterator it = LevelSets.begin(); for(int i=0; i<Model.levelset(); ++i) ++it; boolop_ls = &(*it); } else{ return; } if (Model.level()>0) { if (Model.invert()) boolop_ls->invert(); int j=0; typename LevelSetsType::iterator ls_it = LevelSets.begin(); for (;j<static_cast<int>(LevelSets.size())-Model.level();++j) { ++ls_it; } while (ls_it!=LevelSets.end()) { ls_it->min(*boolop_ls); ls_it->prune(); ls_it->segment(); ++ls_it; } if (Model.invert() && Model.levelset()>=0) boolop_ls->invert(); //Invert again so that the original levelset is not changed } else if(Model.level()<0){ if (Model.invert()) boolop_ls->invert(); int j=0; typename LevelSetsType::iterator ls_it_old = LevelSets.begin(); typename LevelSetsType::iterator ls_it = LevelSets.begin(); for (;j<static_cast<int>(LevelSets.size())+Model.level();++j) { ls_it_old=ls_it; ++ls_it; } if(!Model.wrap_surface()) j=0; while (ls_it!=LevelSets.end()) { ls_it->max(*boolop_ls); if (j>0) ls_it->min(*ls_it_old); ls_it->prune(); ls_it->segment(); ++ls_it; } if (Model.invert() && Model.levelset()>=0) boolop_ls->invert(); //Invert again so that the original levelset is not changed } // remove levelset used for booling if specified if(Model.levelset()>=0 && Model.remove_levelset()){ auto it=LevelSets.begin(); for(int i=0; i<Model.levelset(); ++i) ++it; LevelSets.erase(it); } //Write one output if there is any output time or there is final output if(!(!ProcessParameter.output_times.empty() || ProcessParameter.final_output)) return; { std::ostringstream oss; oss << "Writing output " << output_info.output_counter; //oss << " (time = " << RelativeTime << ")..."; msg::print_start(oss.str()); } typename LevelSetsType::iterator it=LevelSets.begin(); for (unsigned int i=0;i<LevelSets.size();i++) { it->prune(); if (Parameter.print_dx) { std::ostringstream oss; oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".dx"; #ifdef VERBOSE msg::print_message("print dx"); #endif write_explicit_surface_opendx(*it,oss.str()); } if (Parameter.print_vtk) { std::ostringstream oss; oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".vtk"; #ifdef VERBOSE msg::print_message("print vtk"); #endif write_explicit_surface_vtk(*it,oss.str()); } if (Parameter.print_lvst) { std::ostringstream oss; oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".lvst"; #ifdef VERBOSE msg::print_message("print lvst"); #endif it->export_levelset(oss.str(), Parameter.bits_per_distance); } it++; } output_info.output_counter++; msg::print_done(); } //Topography simulation - execute a topography changing process according to required model and parameters template <class LevelSetsType, class ModelType, class ParameterType, class ProcessParameterType, class OutputInfoType> void ExecuteProcess( LevelSetsType& LevelSets, const ModelType& Model, const ParameterType& Parameter, const ProcessParameterType& ProcessParameter, OutputInfoType & output_info, std::vector<double>& Coverages//, // std::vector<double> Rates//, // int step_cycle ) { const int D=LevelSetsType::value_type::dimensions; const std::vector<double> & OutputTimes=ProcessParameter.output_times; //vector of times when output will be recorded std::vector<double>::const_iterator OutputTimesIter = OutputTimes.begin(); //std::lower_bound(OutputTimes.begin(), OutputTimes.end(), AbsoluteTime); //---------------------------------------------------------------------------------------------------------------------------------------- // while (LevelSets.size()>1) { // LevelSets.pop_back(); // } // typedef typename LevelSetsType::value_type LevelSetType; // LevelSets.push_front(LevelSetType(LevelSets.back().grid(), 0, Parameter.open_boundary, !Parameter.open_boundary_negative)); //---------------------------------------------------------------------------------------------------------------------------------------- int init_cycles=ProcessParameter.StartIterationCycles; //number of initial iteration cycles int rec_cycles=ProcessParameter.IterationCycles; //number of subsequent iteration cycles geom::cells<ParameterType::Dimension> Cells; // std::vector<double> Coverages(std::max(LevelSets.back().num_active_pts()* Model.CoverageStorageSize,1u),0.); std::vector<double> Rates(1,0); std::vector<double> NormalVectors; std::vector<double> DistancesToReceiver; std::vector<unsigned int> PointMaterials; std::vector<bool> Connectivities; std::vector<bool> Visibilities; //time statistics const std::string TimeStatFileName=Parameter.output_path+"StatisticsTimes.cvs"; std::ofstream f; //unsigned int LineNumber; if (Parameter.print_statistics) { if(!std::ifstream(TimeStatFileName.c_str())) { #ifdef VERBOSE msg::print_message("Print Header in StatisticsTimes.cvs"); #endif f.open(TimeStatFileName.c_str()); f << "Time for expansion" <<";"; f << "Time for normal vector calc." <<";"; f << "Determining materials" <<";"; f << "Determining connectivities" <<";"; f << "Reduced graph num vertices" <<";"; f << "num componenets" <<";"; f << "Time for smoothing" <<";"; f << "Determining visibilities" <<";"; f << "Setup active cells" <<";"; f << "Setup partition" <<";"; f << "Rate calculation" <<";"; f << "Memory Ray Tracing Data Structure"<<";"; f << "Level set time integration" <<";"; f << "Output" <<";"; f << "Time for Output" <<";"; f << "Total time step excl. Output" <<";"; f << "Total time step incl. Output" <<";"; //TODO f << "Chosen time step" <<";"; //TODO f << "Time" <<";"; //TODO f << "Left Time" <<std::endl; f.close(); } } const double & ProcessTime = ProcessParameter.ProcessTime; double RelativeTime=0; //while ((OutputTimesIter!=OutputTimes.end()) && (RelativeTime>*OutputTimesIter)) ++OutputTimesIter; #ifdef VERBOSE msg::print_message("Start loop over time"); #endif while(true) { // std::vector<double>& Coverages_temp = Coverages; double TimeTotalExclOutput=-my::time::GetTime(); double TimeTotalInclOutput=-my::time::GetTime(); double TimeExpansion=0; double TimeNormals=0; double TimeMaterials=0; double TimeCells=0; double TimePartition=0; double TimeRates=0; double TimeTimeIntegration=0; double TimeOutput=0; double TimeConnectivities=0; double TimeVisibilities=0; double TimeSmoothing=0; double ray_tracing_memory=0; unsigned int graph_size=0; unsigned int num_components=0; bool MakeOutput=false; if (OutputTimesIter!=OutputTimes.end()) { assert(RelativeTime<=*OutputTimesIter); if (RelativeTime==*OutputTimesIter) { MakeOutput=true; OutputTimesIter++; } } //if ((RelativeTime==EndTime) && (ProcessParameter.final_output)) MakeOutput=true; //if ((RelativeTime==StartTime) && (ProcessParameter.initial_output)) MakeOutput=true; if (!MakeOutput) if (RelativeTime==ProcessTime) break; //########################### // smooth surface level set //########################### if (ProcessParameter.smoothing_material_level>0) { #ifdef VERBOSE msg::print_message("smoothing"); #endif TimeSmoothing-=my::time::GetTime(); double time_step; int dummy; int counter=0; do { time_step=lvlset::time_integrate( LevelSets, dummy, lvlset::SMOOTHING_SCHEME(ProcessParameter.smoothing_material_level, ProcessParameter.smoothing_max_curvature, ProcessParameter.smoothing_min_curvature), Parameter.cfl_condition, std::numeric_limits<double>::max(), Coverages, Model.CoverageStorageSize); counter++; } while (time_step!=std::numeric_limits<double>::max() && counter < ProcessParameter.smoothing_max_iterations); if (time_step!=std::numeric_limits<double>::max()) { msg::print_message("maximum number of iterations reached during smoothing operation"); } TimeSmoothing+=my::time::GetTime(); } /* //Output statistics for level sets if (Parameter.print_statistics) { TimeTotalExclOutput+=my::time::GetTime(); int i=0; for (typename LevelSetsType::iterator it=LevelSets.begin();it!=LevelSets.end();++it) { std::ostringstream tmp; tmp << Parameter.output_path << "StatisticsLevelSet" << i << ".cvs"; lvlset::misc::PrintStatistics(*it, tmp.str()); i++; } TimeTotalExclOutput-=my::time::GetTime(); } */ if (Model.ReemissionIsMaterialDependent) { #ifdef VERBOSE msg::print_message("determine top most layer"); #endif TimeMaterials-=my::time::GetTime(); DetermineTopMostLayer(LevelSets, PointMaterials); TimeMaterials+=my::time::GetTime(); } if (Model.CalculateConnectivities) { #ifdef VERBOSE msg::print_message("calculate connectivities"); #endif TimeConnectivities-=my::time::GetTime(); std::pair<unsigned int, unsigned int> x=CalculateConnectivities(LevelSets.back(), Connectivities, Parameter.open_boundary_negative); graph_size=x.first; num_components=x.second; TimeConnectivities+=my::time::GetTime(); } if (Model.CalculateVisibilities) { #ifdef VERBOSE msg::print_message("calculate visibilities"); #endif TimeVisibilities-=my::time::GetTime(); CalculateVisibilities(LevelSets.back(), Visibilities, Parameter.open_boundary, Parameter.open_boundary_negative); TimeVisibilities+=my::time::GetTime(); } if ((Model.CalculateNormalVectors) || (Model.NumberOfParticleTypes>0)){ #ifdef VERBOSE msg::print_message("expansion"); #endif TimeExpansion-=my::time::GetTime(); LevelSets.back().expand(3); TimeExpansion+=my::time::GetTime(); #ifdef VERBOSE msg::print_message("normal vector calculation"); #endif TimeNormals-=my::time::GetTime(); calc::CalculateNormalVectors(LevelSets.back(), NormalVectors, DistancesToReceiver, Parameter.open_boundary, Parameter.open_boundary_negative, Parameter.receptor_radius, lvlset::vec<double,D>(Parameter.default_disc_orientation)); TimeNormals+=my::time::GetTime(); } double MaxStep=0; if (Model.NumberOfParticleTypes>0) { #ifdef VERBOSE msg::print_message("start monte carlo"); #endif std::vector<lvlset::vec<int,ParameterType::Dimension > > CellCoordinates; TimeExpansion-=my::time::GetTime(); LevelSets.back().add_voxel_corners(); TimeExpansion+=my::time::GetTime(); TimeCells-=my::time::GetTime(); calc::SetupCells(LevelSets.back(),Cells, CellCoordinates, NormalVectors, DistancesToReceiver, Parameter.receptor_radius); TimeCells+=my::time::GetTime(); typedef typename calc::PartitionTraits<ParameterType> tmp_type; #ifdef COMPILE_PARTITION_NEIGHBOR_LINKS_ARRAYS if (ProcessParameter.partition_data_structure==partition::NEIGHBOR_LINKS_ARRAYS) { partition::NeighborLinksArrays<tmp_type> Partition; TimePartition-=my::time::GetTime(); Partition.Setup(0, Cells.size(), CellCoordinates, LevelSets.back().grid().boundary_conditions(),ProcessParameter.partition_splitting_strategy,ProcessParameter.partition_surface_area_heuristic_lambda); TimePartition+=my::time::GetTime(); ray_tracing_memory=Partition.get_memory(); if (Parameter.print_statistics) { TimeTotalExclOutput+=my::time::GetTime(); Partition.PrintStatistics(Parameter.output_path+"StatisiticsPartition.cvs"); TimeTotalExclOutput-=my::time::GetTime(); } TimeRates-=my::time::GetTime(); do { calc::CalculateRates(Model,Parameter,Partition,LevelSets.back(),NormalVectors,DistancesToReceiver,Coverages,Rates,PointMaterials,Cells,RelativeTime); //std::cout << "RelativeTime = " << RelativeTime << "\n"; calc::UpdateCoverages(Rates, Coverages, Model, MaxStep);//, RelativeTime); // //std::cout << "MaxStep = " << MaxStep << "\n"; init_cycles--; } while (init_cycles>=0); init_cycles=rec_cycles; TimeRates+=my::time::GetTime(); } #endif #ifdef COMPILE_PARTITION_FULL_GRID if (ProcessParameter.partition_data_structure==partition::FULL_GRID) { partition::FullGrid<tmp_type> Partition; TimePartition-=my::time::GetTime(); Partition.Setup(0, Cells.size(), CellCoordinates, LevelSets.back().grid().boundary_conditions(),ProcessParameter.partition_splitting_strategy,ProcessParameter.partition_surface_area_heuristic_lambda); TimePartition+=my::time::GetTime(); ray_tracing_memory=Partition.get_memory(); if (Parameter.print_statistics) { TimeTotalExclOutput+=my::time::GetTime(); Partition.PrintStatistics(Parameter.output_path+"StatisiticsPartition.cvs"); TimeTotalExclOutput-=my::time::GetTime(); } TimeRates-=my::time::GetTime(); do { calc::CalculateRates(Model,Parameter,Partition,LevelSets.back(),NormalVectors,DistancesToReceiver,Coverages,Rates,PointMaterials,Cells,RelativeTime); calc::UpdateCoverages(Rates, Coverages, Model, MaxStep);//, RelativeTime); init_cycles--; } while (init_cycles>=0); init_cycles=rec_cycles; TimeRates+=my::time::GetTime(); } #endif #ifdef COMPILE_UP_DOWN_LINKED_TREE if (ProcessParameter.partition_data_structure==partition::UP_DOWN_LINKED_TREE) { partition::UpDownLinkTree<tmp_type> Partition; TimePartition-=my::time::GetTime(); Partition.Setup(0, Cells.size(), CellCoordinates, LevelSets.back().grid().boundary_conditions(),ProcessParameter.partition_splitting_strategy,ProcessParameter.partition_surface_area_heuristic_lambda); TimePartition+=my::time::GetTime(); ray_tracing_memory=Partition.get_memory(); if (Parameter.print_statistics) { TimeTotalExclOutput+=my::time::GetTime(); Partition.PrintStatistics(Parameter.output_path+"StatisiticsPartition.cvs"); TimeTotalExclOutput-=my::time::GetTime(); } TimeRates-=my::time::GetTime(); do { calc::CalculateRates(Model,Parameter,Partition,LevelSets.back(),NormalVectors,DistancesToReceiver,Coverages,Rates,PointMaterials,Cells,RelativeTime); calc::UpdateCoverages(Rates, Coverages, Model, MaxStep);//, RelativeTime); init_cycles--; } while (init_cycles>=0); init_cycles=rec_cycles; TimeRates+=my::time::GetTime(); } #endif } //####################################### // output //####################################### TimeTotalExclOutput+=my::time::GetTime(); TimeOutput-=my::time::GetTime(); if (MakeOutput) { #ifdef VERBOSE msg::print_message("make output"); #endif DataAccessClass<ModelType, ParameterType::Dimension> Data( Model, &Coverages[0], &Rates[0], &NormalVectors[0], PointMaterials, Connectivities, Visibilities, ProcessParameter.print_velocities || Parameter.print_velocities, ProcessParameter.print_coverages || Parameter.print_coverages, ProcessParameter.print_rates || Parameter.print_rates, ProcessParameter.print_materials || Parameter.print_materials ); { std::ostringstream oss; oss << "Writing output " << output_info.output_counter; oss << " (time = " << RelativeTime << ")..."; msg::print_start(oss.str()); } typename LevelSetsType::iterator it=LevelSets.begin(); for (unsigned int i=0;i<LevelSets.size();i++) { it->prune(); if (Parameter.print_dx) { std::ostringstream oss; oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".dx"; #ifdef VERBOSE msg::print_message("print dx"); #endif if (i!=LevelSets.size()-1) { write_explicit_surface_opendx(*it,oss.str()); } else { write_explicit_surface_opendx(*it,oss.str(), Data); } } if (Parameter.print_vtk) { std::ostringstream oss; oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".vtk"; #ifdef VERBOSE msg::print_message("print vtk"); #endif if (i!=LevelSets.size()-1) { write_explicit_surface_vtk(*it,oss.str()); } else { write_explicit_surface_vtk(*it,oss.str(), Data); } } if (Parameter.print_lvst) { std::ostringstream oss; oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".lvst"; #ifdef VERBOSE msg::print_message("print lvst"); #endif it->export_levelset(oss.str(), Parameter.bits_per_distance); } it++; } output_info.output_counter++; msg::print_done(); } TimeOutput+=my::time::GetTime(); TimeTotalExclOutput-=my::time::GetTime(); // //std::cout << "Relative Time: " << RelativeTime << "\n"; bool is_finished=(RelativeTime==ProcessTime); //####################################### // time integration //####################################### #ifdef VERBOSE msg::print_message("time integration"); #endif double time_step=0; if (!is_finished) { //determine next time stop double NextTimeStop=std::min(ProcessTime, std::min(RelativeTime+ProcessParameter.MaxTimeStep,RelativeTime+MaxStep)); if (OutputTimesIter!=OutputTimes.end()) NextTimeStop=std::min(NextTimeStop, *OutputTimesIter); double MaxTimeStep=NextTimeStop-RelativeTime; // //std::cout << "MaxTimeStep = " << MaxTimeStep << "\n"; if (ProcessParameter.FiniteDifferenceScheme==ENGQUIST_OSHER_1ST_ORDER) { VelocityClass2<ModelType, ParameterType::Dimension> Velocities(Model, &NormalVectors[0], &Coverages[0], &Rates[0], Connectivities, Visibilities); LevelSetsType& LevelSets_temp=LevelSets; TimeExpansion-=my::time::GetTime(); LevelSets_temp.back().expand(3); TimeExpansion+=my::time::GetTime(); TimeTimeIntegration-=my::time::GetTime(); time_step=lvlset::time_integrate( LevelSets_temp, Velocities, lvlset::ENGQUIST_OSHER_SV_1ST_ORDER, Parameter.cfl_condition, MaxTimeStep, Coverages, Model.CoverageStorageSize); // if (time_step == MaxTimeStep) { // LevelSets.back().expand(3); // LevelSets=LevelSets_temp; // } else { // continue; // } TimeTimeIntegration+=my::time::GetTime(); } else if (ProcessParameter.FiniteDifferenceScheme==ENGQUIST_OSHER_2ND_ORDER) { VelocityClass2<ModelType, ParameterType::Dimension> Velocities(Model, &NormalVectors[0], &Coverages[0], &Rates[0], Connectivities, Visibilities); TimeExpansion-=my::time::GetTime(); LevelSets.back().expand(5); TimeExpansion+=my::time::GetTime(); TimeTimeIntegration-=my::time::GetTime(); time_step=lvlset::time_integrate( LevelSets, Velocities, lvlset::ENGQUIST_OSHER_SV_2ND_ORDER, Parameter.cfl_condition, MaxTimeStep, Coverages, Model.CoverageStorageSize); TimeTimeIntegration+=my::time::GetTime(); } else if (ProcessParameter.FiniteDifferenceScheme==LAX_FRIEDRICHS_1ST_ORDER) { //TODO VelocityClass<ModelType, ParameterType::Dimension> Velocities(Model, &NormalVectors[0], &Coverages[0], &Rates[0], Connectivities, Visibilities); TimeExpansion-=my::time::GetTime(); LevelSets.back().expand(3); TimeExpansion+=my::time::GetTime(); TimeTimeIntegration-=my::time::GetTime(); time_step=lvlset::time_integrate( LevelSets, Velocities, lvlset::LAX_FRIEDRICHS_SCALAR_1ST_ORDER(ProcessParameter.LaxFriedrichsDissipationCoefficient), Parameter.cfl_condition, MaxTimeStep, Coverages, Model.CoverageStorageSize); TimeTimeIntegration+=my::time::GetTime(); } else assert(0); if (time_step>=MaxTimeStep) { assert(time_step==MaxTimeStep); time_step=MaxTimeStep; RelativeTime=NextTimeStop; } else { RelativeTime+=time_step; } } TimeTotalExclOutput+=my::time::GetTime(); TimeTotalInclOutput+=my::time::GetTime(); //####################################### // print statistics //####################################### if (Parameter.print_statistics) { #ifdef VERBOSE msg::print_message("print statistics"); #endif f.open(TimeStatFileName.c_str(),std::ios_base::app); f<<TimeExpansion <<";"; f<<TimeNormals <<";"; f<<TimeMaterials <<";"; f<<TimeConnectivities <<";"; f<<graph_size <<";"; f<<num_components <<";"; f<<TimeSmoothing <<";"; f<<TimeVisibilities <<";"; f<<TimeCells <<";"; f<<TimePartition <<";"; f<<TimeRates <<";"; f<<ray_tracing_memory <<";"; f<<TimeTimeIntegration <<";"; f<<MakeOutput <<";"; f<<TimeOutput <<";"; f<<TimeTotalExclOutput <<";"; f<<TimeTotalInclOutput <<";"; f<<time_step <<";"; f<<RelativeTime <<";"; f<<(ProcessTime-RelativeTime) << std::endl; f.close(); } if (is_finished) break; } } ///Includes loop over full process time to run the simulation. template <class LevelSetsType, class ModelType, class ParameterType, class ProcessParameterType, class OutputInfoType> void ExecuteProcess( LevelSetsType& LevelSets, const ModelType& Model, const ParameterType& Parameter, const ProcessParameterType& ProcessParameter, OutputInfoType & output_info ) { const int D=LevelSetsType::value_type::dimensions; const std::vector<double> & OutputTimes=ProcessParameter.output_times; //vector of times when output will be recorded const std::vector<double> & OutputVolume=ProcessParameter.output_volume; //vector of times for volume output std::vector<double>::const_iterator OutputTimesIter = OutputTimes.begin(); std::vector<double>::const_iterator OutputVolumeIter = OutputVolume.begin(); //std::lower_bound(OutputTimes.begin(), OutputTimes.end(), AbsoluteTime); //---------------------------------------------------------------------------------------------------------------------------------------- // while (LevelSets.size()>1) { // LevelSets.pop_back(); // } // typedef typename LevelSetsType::value_type LevelSetType; // LevelSets.push_front(LevelSetType(LevelSets.back().grid(), 0, Parameter.open_boundary, !Parameter.open_boundary_negative)); //---------------------------------------------------------------------------------------------------------------------------------------- int init_cycles=ProcessParameter.StartIterationCycles; //number of initial iteration cycles int rec_cycles=ProcessParameter.IterationCycles; //number of subsequent iteration cycles geom::cells<ParameterType::Dimension> Cells; std::vector<double> Coverages(std::max(LevelSets.back().num_active_pts()* Model.CoverageStorageSize,1u),0.); std::vector<double> Rates(1,0); std::vector<double> NormalVectors; std::vector<double> DistancesToReceiver; std::vector<unsigned int> PointMaterials; std::vector<bool> Connectivities; std::vector<bool> Visibilities; //time statistics const std::string TimeStatFileName=Parameter.output_path + "StatisticsTimes.cvs"; std::ofstream f; //unsigned int LineNumber; if (Parameter.print_statistics) { if(!std::ifstream(TimeStatFileName.c_str())) { #ifdef VERBOSE msg::print_message("Print Header in StatisticsTimes.cvs"); #endif f.open(TimeStatFileName.c_str()); f << "Time for expansion" <<";"; f << "Time for normal vector calc." <<";"; f << "Determining materials" <<";"; f << "Determining connectivities" <<";"; f << "Reduced graph num vertices" <<";"; f << "num componenets" <<";"; f << "Time for smoothing" <<";"; f << "Determining visibilities" <<";"; f << "Setup active cells" <<";"; f << "Setup partition" <<";"; f << "Rate calculation" <<";"; f << "Memory Ray Tracing Data Structure"<<";"; f << "Level set time integration" <<";"; f << "Output" <<";"; f << "Time for Output" <<";"; f << "Total time step excl. Output" <<";"; f << "Total time step incl. Output" <<";"; //TODO f << "Chosen time step" <<";"; //TODO f << "Time" <<";"; //TODO f << "Left Time" <<std::endl; f.close(); } } const double & ProcessTime = ProcessParameter.ProcessTime; double RelativeTime=0; //while ((OutputTimesIter!=OutputTimes.end()) && (RelativeTime>*OutputTimesIter)) ++OutputTimesIter; #ifdef VERBOSE msg::print_message("Start loop over time"); #endif while(true) { double TimeTotalExclOutput=-my::time::GetTime(); double TimeTotalInclOutput=-my::time::GetTime(); double TimeExpansion=0; double TimeNormals=0; double TimeMaterials=0; double TimeCells=0; double TimePartition=0; double TimeRates=0; double TimeTimeIntegration=0; double TimeOutput=0; double TimeConnectivities=0; double TimeVisibilities=0; double TimeSmoothing=0; double ray_tracing_memory=0; unsigned int graph_size=0; unsigned int num_components=0; bool MakeOutput=false; if (OutputTimesIter!=OutputTimes.end()) { assert(RelativeTime<=*OutputTimesIter); if (RelativeTime==*OutputTimesIter) { MakeOutput=true; OutputTimesIter++; } } //VOLUME OUTPUT bool VolumeOutput=false; if(OutputVolumeIter!=OutputVolume.end()){ assert(RelativeTime<=*OutputVolumeIter); if(RelativeTime==*OutputVolumeIter){ VolumeOutput=true; OutputVolumeIter++; } } //if ((RelativeTime==EndTime) && (ProcessParameter.final_output)) MakeOutput=true; //if ((RelativeTime==StartTime) && (ProcessParameter.initial_output)) MakeOutput=true; if (!MakeOutput && !VolumeOutput) if (RelativeTime==ProcessTime) break; //########################### // smooth surface level set //########################### if (ProcessParameter.smoothing_material_level>0) { #ifdef VERBOSE msg::print_message("smoothing"); #endif TimeSmoothing-=my::time::GetTime(); double time_step; int dummy; int counter=0; do { time_step=lvlset::time_integrate( LevelSets, dummy, lvlset::SMOOTHING_SCHEME(ProcessParameter.smoothing_material_level, ProcessParameter.smoothing_max_curvature, ProcessParameter.smoothing_min_curvature), Parameter.cfl_condition, std::numeric_limits<double>::max(), Coverages, Model.CoverageStorageSize); counter++; } while (time_step!=std::numeric_limits<double>::max() && counter < ProcessParameter.smoothing_max_iterations); if (time_step!=std::numeric_limits<double>::max()) { msg::print_message("maximum number of iterations reached during smoothing operation"); } TimeSmoothing+=my::time::GetTime(); } /* //Output statistics for level sets if (Parameter.print_statistics) { TimeTotalExclOutput+=my::time::GetTime(); int i=0; for (typename LevelSetsType::iterator it=LevelSets.begin();it!=LevelSets.end();++it) { std::ostringstream tmp; tmp << Parameter.output_path << "StatisticsLevelSet" << i << ".cvs"; lvlset::misc::PrintStatistics(*it, tmp.str()); i++; } TimeTotalExclOutput-=my::time::GetTime(); } */ if (Model.ReemissionIsMaterialDependent) { #ifdef VERBOSE msg::print_message("determine top most layer"); #endif TimeMaterials-=my::time::GetTime(); DetermineTopMostLayer(LevelSets, PointMaterials); TimeMaterials+=my::time::GetTime(); } if (Model.CalculateConnectivities) { #ifdef VERBOSE msg::print_message("calculate connectivities"); #endif TimeConnectivities-=my::time::GetTime(); std::pair<unsigned int, unsigned int> x=CalculateConnectivities(LevelSets.back(), Connectivities, Parameter.open_boundary_negative); graph_size=x.first; num_components=x.second; TimeConnectivities+=my::time::GetTime(); } if (Model.CalculateVisibilities) { #ifdef VERBOSE msg::print_message("calculate visibilities"); #endif TimeVisibilities-=my::time::GetTime(); CalculateVisibilities(LevelSets.back(), Visibilities, Parameter.open_boundary, Parameter.open_boundary_negative); TimeVisibilities+=my::time::GetTime(); } if ((Model.CalculateNormalVectors) || (Model.NumberOfParticleTypes>0)){ #ifdef VERBOSE msg::print_message("expansion"); #endif TimeExpansion-=my::time::GetTime(); LevelSets.back().expand(3); TimeExpansion+=my::time::GetTime(); #ifdef VERBOSE msg::print_message("normal vector calculation"); #endif TimeNormals-=my::time::GetTime(); calc::CalculateNormalVectors(LevelSets.back(), NormalVectors, DistancesToReceiver, Parameter.open_boundary, Parameter.open_boundary_negative, Parameter.receptor_radius, lvlset::vec<double,D>(Parameter.default_disc_orientation)); TimeNormals+=my::time::GetTime(); } if (Model.NumberOfParticleTypes>0) { #ifdef VERBOSE msg::print_message("start monte carlo"); #endif std::vector<lvlset::vec<int,ParameterType::Dimension > > CellCoordinates; TimeExpansion-=my::time::GetTime(); LevelSets.back().add_voxel_corners(); TimeExpansion+=my::time::GetTime(); TimeCells-=my::time::GetTime(); calc::SetupCells(LevelSets.back(),Cells, CellCoordinates, NormalVectors, DistancesToReceiver, Parameter.receptor_radius); TimeCells+=my::time::GetTime(); typedef typename calc::PartitionTraits<ParameterType> tmp_type; #ifdef COMPILE_PARTITION_NEIGHBOR_LINKS_ARRAYS if (ProcessParameter.partition_data_structure==partition::NEIGHBOR_LINKS_ARRAYS) { partition::NeighborLinksArrays<tmp_type> Partition; TimePartition-=my::time::GetTime(); Partition.Setup(0, Cells.size(), CellCoordinates, LevelSets.back().grid().boundary_conditions(),ProcessParameter.partition_splitting_strategy,ProcessParameter.partition_surface_area_heuristic_lambda); TimePartition+=my::time::GetTime(); ray_tracing_memory=Partition.get_memory(); if (Parameter.print_statistics) { TimeTotalExclOutput+=my::time::GetTime(); Partition.PrintStatistics(Parameter.output_path+"StatisiticsPartition.cvs"); TimeTotalExclOutput-=my::time::GetTime(); } TimeRates-=my::time::GetTime(); do { // std::cout << "calculate rates!\n"; calc::CalculateRates(Model,Parameter,Partition,LevelSets.back(),NormalVectors,DistancesToReceiver,Coverages,Rates,PointMaterials,Cells,RelativeTime); // std::cout << "update coverages!\n"; calc::UpdateCoverages(Rates, Coverages, Model); init_cycles--; } while (init_cycles>=0); init_cycles=rec_cycles; TimeRates+=my::time::GetTime(); } #endif #ifdef COMPILE_PARTITION_FULL_GRID if (ProcessParameter.partition_data_structure==partition::FULL_GRID) { partition::FullGrid<tmp_type> Partition; TimePartition-=my::time::GetTime(); Partition.Setup(0, Cells.size(), CellCoordinates, LevelSets.back().grid().boundary_conditions(),ProcessParameter.partition_splitting_strategy,ProcessParameter.partition_surface_area_heuristic_lambda); TimePartition+=my::time::GetTime(); ray_tracing_memory=Partition.get_memory(); if (Parameter.print_statistics) { TimeTotalExclOutput+=my::time::GetTime(); Partition.PrintStatistics(Parameter.output_path+"StatisiticsPartition.cvs"); TimeTotalExclOutput-=my::time::GetTime(); } TimeRates-=my::time::GetTime(); do { calc::CalculateRates(Model,Parameter,Partition,LevelSets.back(),NormalVectors,DistancesToReceiver,Coverages,Rates,PointMaterials,Cells,RelativeTime); calc::UpdateCoverages(Rates, Coverages, Model); init_cycles--; } while (init_cycles>=0); init_cycles=rec_cycles; TimeRates+=my::time::GetTime(); } #endif #ifdef COMPILE_UP_DOWN_LINKED_TREE if (ProcessParameter.partition_data_structure==partition::UP_DOWN_LINKED_TREE) { partition::UpDownLinkTree<tmp_type> Partition; TimePartition-=my::time::GetTime(); Partition.Setup(0, Cells.size(), CellCoordinates, LevelSets.back().grid().boundary_conditions(),ProcessParameter.partition_splitting_strategy,ProcessParameter.partition_surface_area_heuristic_lambda); TimePartition+=my::time::GetTime(); ray_tracing_memory=Partition.get_memory(); if (Parameter.print_statistics) { TimeTotalExclOutput+=my::time::GetTime(); Partition.PrintStatistics(Parameter.output_path+"StatisiticsPartition.cvs"); TimeTotalExclOutput-=my::time::GetTime(); } TimeRates-=my::time::GetTime(); do { calc::CalculateRates(Model,Parameter,Partition,LevelSets.back(),NormalVectors,DistancesToReceiver,Coverages,Rates,PointMaterials,Cells,RelativeTime); calc::UpdateCoverages(Rates, Coverages, Model); init_cycles--; } while (init_cycles>=0); init_cycles=rec_cycles; TimeRates+=my::time::GetTime(); } #endif } //####################################### // output //####################################### TimeTotalExclOutput+=my::time::GetTime(); TimeOutput-=my::time::GetTime(); if (MakeOutput) { #ifdef VERBOSE msg::print_message("make output"); #endif DataAccessClass<ModelType, ParameterType::Dimension> Data( Model, &Coverages[0], &Rates[0], &NormalVectors[0], PointMaterials, Connectivities, Visibilities, ProcessParameter.print_velocities || Parameter.print_velocities, ProcessParameter.print_coverages || Parameter.print_coverages, ProcessParameter.print_rates || Parameter.print_rates, ProcessParameter.print_materials || Parameter.print_materials ); { std::ostringstream oss; oss << "Writing output " << output_info.output_counter; oss << " (time = " << RelativeTime << ")..."; msg::print_start(oss.str()); } typename LevelSetsType::iterator it=LevelSets.begin(); for (unsigned int i=0;i<LevelSets.size();i++) { //for each levelset remove non opposite signed neighbors before outputting it to a file it->prune(); if (Parameter.print_dx) { std::ostringstream oss; oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".dx"; #ifdef VERBOSE msg::print_message("print dx"); #endif if (i!=LevelSets.size()-1) { write_explicit_surface_opendx(*it,oss.str()); } else { write_explicit_surface_opendx(*it,oss.str(), Data); } } if (Parameter.print_vtk) { std::ostringstream oss; oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".vtk"; #ifdef VERBOSE msg::print_message("print vtk"); #endif if (i!=LevelSets.size()-1) { write_explicit_surface_vtk(*it,oss.str()); } else { write_explicit_surface_vtk(*it,oss.str(), Data); } } if(Parameter.print_vtp){ std::ostringstream oss; oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".vtp"; #ifdef VERBOSE msg::print_message("print vtp"); #endif if (i!=LevelSets.size()-1) { write_explicit_surface_vtp(*it,oss.str()); } else { write_explicit_surface_vtp(*it,oss.str(), Data); } } if (Parameter.print_lvst) { std::ostringstream oss; oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".lvst"; #ifdef VERBOSE msg::print_message("print lvst"); #endif it->export_levelset(oss.str(), Parameter.bits_per_distance); } it++; } if(!VolumeOutput) output_info.output_counter++; msg::print_done(); } if(VolumeOutput){ { std::ostringstream oss; oss << "Writing volume " << output_info.output_counter; oss << " (time = " << RelativeTime << ")..."; msg::print_start(oss.str()); } lvlset::write_explicit_volume_vtk(LevelSets, output_info.output_counter, Parameter); output_info.output_counter++; msg::print_done(); } TimeOutput+=my::time::GetTime(); TimeTotalExclOutput-=my::time::GetTime(); // //std::cout << "Relative Time: " << RelativeTime << "\n"; bool is_finished=(RelativeTime==ProcessTime); //####################################### // time integration //####################################### #ifdef VERBOSE msg::print_message("time integration"); #endif double time_step=0; if (!is_finished) { //determine next time stop double NextTimeStop=std::min(ProcessTime, RelativeTime+ProcessParameter.MaxTimeStep); if (OutputTimesIter!=OutputTimes.end()) NextTimeStop=std::min(NextTimeStop, *OutputTimesIter); double MaxTimeStep=NextTimeStop-RelativeTime; if (ProcessParameter.FiniteDifferenceScheme==ENGQUIST_OSHER_1ST_ORDER) { VelocityClass2<ModelType, ParameterType::Dimension> Velocities(Model, &NormalVectors[0], &Coverages[0], &Rates[0], Connectivities, Visibilities); TimeExpansion-=my::time::GetTime(); LevelSets.back().expand(3); TimeExpansion+=my::time::GetTime(); TimeTimeIntegration-=my::time::GetTime(); time_step=lvlset::time_integrate( LevelSets, Velocities, lvlset::ENGQUIST_OSHER_SV_1ST_ORDER, Parameter.cfl_condition, MaxTimeStep, Coverages, Model.CoverageStorageSize); TimeTimeIntegration+=my::time::GetTime(); } else if (ProcessParameter.FiniteDifferenceScheme==ENGQUIST_OSHER_2ND_ORDER) { VelocityClass2<ModelType, ParameterType::Dimension> Velocities(Model, &NormalVectors[0], &Coverages[0], &Rates[0], Connectivities, Visibilities); TimeExpansion-=my::time::GetTime(); LevelSets.back().expand(5); TimeExpansion+=my::time::GetTime(); TimeTimeIntegration-=my::time::GetTime(); time_step=lvlset::time_integrate( LevelSets, Velocities, lvlset::ENGQUIST_OSHER_SV_2ND_ORDER, Parameter.cfl_condition, MaxTimeStep, Coverages, Model.CoverageStorageSize); TimeTimeIntegration+=my::time::GetTime(); } else if (ProcessParameter.FiniteDifferenceScheme==LAX_FRIEDRICHS_1ST_ORDER) { //TODO VelocityClass<ModelType, ParameterType::Dimension> Velocities(Model, &NormalVectors[0], &Coverages[0], &Rates[0], Connectivities, Visibilities); TimeExpansion-=my::time::GetTime(); LevelSets.back().expand(3); TimeExpansion+=my::time::GetTime(); TimeTimeIntegration-=my::time::GetTime(); time_step=lvlset::time_integrate( LevelSets, Velocities, lvlset::LAX_FRIEDRICHS_SCALAR_1ST_ORDER(ProcessParameter.LaxFriedrichsDissipationCoefficient), Parameter.cfl_condition, MaxTimeStep, Coverages, Model.CoverageStorageSize); TimeTimeIntegration+=my::time::GetTime(); } else assert(0); if (time_step>=MaxTimeStep) { assert(time_step==MaxTimeStep); time_step=MaxTimeStep; RelativeTime=NextTimeStop; } else { RelativeTime+=time_step; } } TimeTotalExclOutput+=my::time::GetTime(); TimeTotalInclOutput+=my::time::GetTime(); //####################################### // print statistics //####################################### if (Parameter.print_statistics) { #ifdef VERBOSE msg::print_message("print statistics"); #endif f.open(TimeStatFileName.c_str(),std::ios_base::app); f<<TimeExpansion <<";"; f<<TimeNormals <<";"; f<<TimeMaterials <<";"; f<<TimeConnectivities <<";"; f<<graph_size <<";"; f<<num_components <<";"; f<<TimeSmoothing <<";"; f<<TimeVisibilities <<";"; f<<TimeCells <<";"; f<<TimePartition <<";"; f<<TimeRates <<";"; f<<ray_tracing_memory <<";"; f<<TimeTimeIntegration <<";"; f<<MakeOutput <<";"; f<<TimeOutput <<";"; f<<TimeTotalExclOutput <<";"; f<<TimeTotalInclOutput <<";"; f<<time_step <<";"; f<<RelativeTime <<";"; f<<(ProcessTime-RelativeTime) << std::endl; f.close(); } if (is_finished) break; } } } #endif /*PROCESS_H_*/
GB_binop__min_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__min_int32) // A.*B function (eWiseMult): GB (_AemultB_08__min_int32) // A.*B function (eWiseMult): GB (_AemultB_02__min_int32) // A.*B function (eWiseMult): GB (_AemultB_04__min_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__min_int32) // A*D function (colscale): GB (_AxD__min_int32) // D*A function (rowscale): GB (_DxB__min_int32) // C+=B function (dense accum): GB (_Cdense_accumB__min_int32) // C+=b function (dense accum): GB (_Cdense_accumb__min_int32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_int32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_int32) // C=scalar+B GB (_bind1st__min_int32) // C=scalar+B' GB (_bind1st_tran__min_int32) // C=A+scalar GB (_bind2nd__min_int32) // C=A'+scalar GB (_bind2nd_tran__min_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMIN (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_INT32 || GxB_NO_MIN_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__min_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__min_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__min_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__min_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__min_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__min_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__min_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__min_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__min_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__min_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__min_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__min_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__min_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB (_bind1st_tran__min_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__min_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
threading.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_UTILS_THREADING_H_ #define LIGHTGBM_UTILS_THREADING_H_ #include <LightGBM/utils/openmp_wrapper.h> #include <functional> #include <vector> namespace LightGBM { class Threading { public: template<typename INDEX_T> static inline void For(INDEX_T start, INDEX_T end, const std::function<void(int, INDEX_T, INDEX_T)>& inner_fun) { int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } INDEX_T num_inner = (end - start + num_threads - 1) / num_threads; if (num_inner <= 0) { num_inner = 1; } OMP_INIT_EX(); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < num_threads; ++i) { OMP_LOOP_EX_BEGIN(); INDEX_T inner_start = start + num_inner * i; INDEX_T inner_end = inner_start + num_inner; if (inner_end > end) { inner_end = end; } if (inner_start < end) { inner_fun(i, inner_start, inner_end); } OMP_LOOP_EX_END(); } OMP_THROW_EX(); } }; } // namespace LightGBM #endif // LightGBM_UTILS_THREADING_H_
SpatialAveragePooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialAveragePooling.c" #else static inline void THNN_(SpatialAveragePooling_shapeCheck)( THTensor *input, THTensor *gradOutput, int kH, int kW, int dH, int dW, int padH, int padW, bool ceil_mode) { THArgCheck(kW > 0 && kH > 0, 5, "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW); THArgCheck(dW > 0 && dH > 0, 8, "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); int ndim = input->dim(); int dimf = 0; int dimh = 1; int dimw = 2; if (ndim == 4) { dimf++; dimh++; dimw++; } THNN_ARGCHECK(!input->is_empty() && (ndim == 3 || ndim == 4), 2, input, "non-empty 3D or 4D input tensor expected but got: %s"); THArgCheck(kW/2 >= padW && kH/2 >= padH, 2, "pad should be smaller than half of kernel size, but got " "padW = %d, padH = %d, kW = %d, kH = %d", padW, padH, kW, kH); int64_t nInputPlane = input->size(dimh-1); int64_t inputHeight = input->size(dimh); int64_t inputWidth = input->size(dimw); int64_t outputHeight, outputWidth; int64_t nOutputPlane = nInputPlane; if(ceil_mode) { outputHeight = (int64_t)(ceil((float)(inputHeight - kH + 2*padH) / dH)) + 1; outputWidth = (int64_t)(ceil((float)(inputWidth - kW + 2*padW) / dW)) + 1; } else { outputHeight = (int64_t)(floor((float)(inputHeight - kH + 2*padH) / dH)) + 1; outputWidth = (int64_t)(floor((float)(inputWidth - kW + 2*padW) / dW)) + 1; } if (padW || padH) { // ensure that the last pooling starts inside the image // needed to avoid problems in ceil mode if ((outputHeight - 1)*dH >= inputHeight + padH) --outputHeight; if ((outputWidth - 1)*dW >= inputWidth + padW) --outputWidth; } if (outputWidth < 1 || outputHeight < 1) THError("Given input size: (%dx%dx%d). " "Calculated output size: (%dx%dx%d). Output size is too small", nInputPlane,inputHeight,inputWidth,nInputPlane,outputHeight,outputWidth); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); } } void THNN_(SpatialAveragePooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad) { real *output_data; real *input_data; int dimw = 2; int dimh = 1; int dimc = 0; int64_t nbatch = 1; int64_t inputWidth; int64_t inputHeight; int64_t outputWidth; int64_t outputHeight; int64_t nInputPlane; // number of channels (or colors) int64_t k; THNN_(SpatialAveragePooling_shapeCheck) (input, NULL, kH, kW, dH, dW, padH, padW, ceil_mode); if (input->dim() == 4) { nbatch = input->size(0); dimw++; dimh++; dimc++; } inputWidth = input->size(dimw); inputHeight = input->size(dimh); nInputPlane = input->size(dimc); if(ceil_mode) { outputWidth = (int64_t)(ceil((float)(inputWidth - kW + 2*padW) / dW)) + 1; outputHeight = (int64_t)(ceil((float)(inputHeight - kH + 2*padH) / dH)) + 1; } else { outputWidth = (int64_t)(floor((float)(inputWidth - kW + 2*padW) / dW)) + 1; outputHeight = (int64_t)(floor((float)(inputHeight - kH + 2*padH) / dH)) + 1; } if (padW || padH) { // ensure that the last pooling starts inside the image // needed to avoid problems in ceil mode if ((outputHeight - 1)*dH >= inputHeight + padH) --outputHeight; if ((outputWidth - 1)*dW >= inputWidth + padW) --outputWidth; } if (input->dim() == 3) THTensor_(resize3d)(output, nInputPlane, outputHeight, outputWidth); else THTensor_(resize4d)(output, input->size(0), nInputPlane, outputHeight, outputWidth); input = THTensor_(newContiguous)(input); THArgCheck(THTensor_(isContiguous)(output), 3, "output must be contiguous"); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); #pragma omp parallel for private(k) for(k = 0; k < nInputPlane; k++) { int64_t p; for(p = 0; p < nbatch; p++) { int64_t xx, yy; /* For all output pixels... */ real *ptr_output = output_data + p*nInputPlane*outputWidth*outputHeight + k*outputWidth*outputHeight; real *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight; int64_t i; for(i = 0; i < outputWidth*outputHeight; i++) ptr_output[i] = 0; for(yy = 0; yy < outputHeight; yy++) { for(xx = 0; xx < outputWidth; xx++) { /* Compute the mean of the input image... */ int64_t hstart = yy * dH - padH; int64_t wstart = xx * dW - padW; int64_t hend = fminf(hstart + kH, inputHeight + padH); int64_t wend = fminf(wstart + kW, inputWidth + padW); int pool_size = (hend - hstart) * (wend - wstart); hstart = fmaxf(hstart, 0); wstart = fmaxf(wstart, 0); hend = fminf(hend, inputHeight); wend = fminf(wend, inputWidth); real sum = 0; int divide_factor; if(count_include_pad) divide_factor = pool_size; else divide_factor = (hend - hstart) * (wend - wstart); int64_t kx, ky; for(ky = hstart; ky < hend; ky++) { for(kx = wstart; kx < wend; kx++) sum += ptr_input[ky*inputWidth + kx]; } /* Update output */ *ptr_output++ += sum/divide_factor; } } } } THTensor_(free)(input); } void THNN_(SpatialAveragePooling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad) { int dimw = 2; int dimh = 1; int dimc = 0; int64_t nbatch = 1; int64_t ndim = 3; int64_t inputWidth; int64_t inputHeight; int64_t outputWidth; int64_t outputHeight; int64_t nInputPlane; // number of channels (or colors) real *gradOutput_data; real *gradInput_data; int64_t k; THNN_(SpatialAveragePooling_shapeCheck) (input, gradOutput, kH, kW, dH, dW, padH, padW, ceil_mode); if (input->dim() == 4) { nbatch = input->size(0); dimw++; dimh++; dimc++; ndim = 4; } inputWidth = input->size(dimw); inputHeight = input->size(dimh); nInputPlane = input->size(dimc); if(ceil_mode) { outputWidth = (int64_t)(ceil((float)(inputWidth - kW + 2*padW) / dW)) + 1; outputHeight = (int64_t)(ceil((float)(inputHeight - kH + 2*padH) / dH)) + 1; } else { outputWidth = (int64_t)(floor((float)(inputWidth - kW + 2*padW) / dW)) + 1; outputHeight = (int64_t)(floor((float)(inputHeight - kH + 2*padH) / dH)) + 1; } if (padW || padH) { // ensure that the last pooling starts inside the image // needed to avoid problems in ceil mode if ((outputHeight - 1)*dH >= inputHeight + padH) --outputHeight; if ((outputWidth - 1)*dW >= inputWidth + padW) --outputWidth; } THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); THTensor_(resizeAs)(gradInput, input); gradOutput = THTensor_(newContiguous)(gradOutput); THArgCheck(THTensor_(isContiguous)(gradInput), 4, "gradInput must be contiguous"); gradInput_data = THTensor_(data)(gradInput); gradOutput_data = THTensor_(data)(gradOutput); #pragma omp parallel for private(k) for(k = 0; k < nInputPlane; k++) { int64_t p; for(p = 0; p < nbatch; p++) { real *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight; int64_t xx, yy; real* ptr_gi = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight; real *ptr_gradInput = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight; int64_t i; for(i=0; i<inputWidth*inputHeight; i++) ptr_gi[i] = 0.0; for(yy = 0; yy < outputHeight; yy++) { for(xx = 0; xx < outputWidth; xx++) { int64_t hstart = yy * dH - padH; int64_t wstart = xx * dW - padW; int64_t hend = fminf(hstart + kH, inputHeight + padH); int64_t wend = fminf(wstart + kW, inputWidth + padW); int pool_size = (hend - hstart) * (wend - wstart); hstart = fmaxf(hstart, 0); wstart = fmaxf(wstart, 0); hend = fminf(hend, inputHeight); wend = fminf(wend, inputWidth); real z = *ptr_gradOutput++; int divide_factor; if(count_include_pad) divide_factor = pool_size; else divide_factor = (hend - hstart) * (wend - wstart); int64_t kx, ky; for(ky = hstart ; ky < hend; ky++) { for(kx = wstart; kx < wend; kx++) ptr_gradInput[ky*inputWidth + kx] += z/divide_factor; } } } } } THTensor_(free)(gradOutput); } #endif
aux_interp.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" #include "aux_interp.h" #include "hypre_hopscotch_hash.h" /*--------------------------------------------------------------------------- * Auxilary routines for the long range interpolation methods. * Implemented: "standard", "extended", "multipass", "FF" *--------------------------------------------------------------------------*/ /* AHB 11/06: Modification of the above original - takes two communication packages and inserts nodes to position expected for OUT_marker offd nodes from comm_pkg take up first chunk of CF_marker_offd, offd nodes from extend_comm_pkg take up the second chunk of CF_marker_offd. */ HYPRE_Int hypre_alt_insert_new_nodes(hypre_ParCSRCommPkg *comm_pkg, hypre_ParCSRCommPkg *extend_comm_pkg, HYPRE_Int *IN_marker, HYPRE_Int full_off_procNodes, HYPRE_Int *OUT_marker) { hypre_ParCSRCommHandle *comm_handle; HYPRE_Int i, index, shift; HYPRE_Int num_sends, num_recvs; HYPRE_Int *recv_vec_starts; HYPRE_Int e_num_sends; HYPRE_Int *int_buf_data; HYPRE_Int *e_out_marker; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); e_num_sends = hypre_ParCSRCommPkgNumSends(extend_comm_pkg); index = hypre_max(hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends)); int_buf_data = hypre_CTAlloc(HYPRE_Int, index, HYPRE_MEMORY_HOST); /* orig commpkg data*/ index = 0; HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; ++i) { int_buf_data[i - begin] = IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, OUT_marker); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /* now do the extend commpkg */ /* first we need to shift our position in the OUT_marker */ shift = recv_vec_starts[num_recvs]; e_out_marker = OUT_marker + shift; index = 0; begin = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, 0); end = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; ++i) { int_buf_data[i - begin] = IN_marker[hypre_ParCSRCommPkgSendMapElmt(extend_comm_pkg, i)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, extend_comm_pkg, int_buf_data, e_out_marker); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_big_insert_new_nodes(hypre_ParCSRCommPkg *comm_pkg, hypre_ParCSRCommPkg *extend_comm_pkg, HYPRE_Int *IN_marker, HYPRE_Int full_off_procNodes, HYPRE_BigInt offset, HYPRE_BigInt *OUT_marker) { hypre_ParCSRCommHandle *comm_handle; HYPRE_Int i, index, shift; HYPRE_Int num_sends, num_recvs; HYPRE_Int *recv_vec_starts; HYPRE_Int e_num_sends; HYPRE_BigInt *int_buf_data; HYPRE_BigInt *e_out_marker; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); e_num_sends = hypre_ParCSRCommPkgNumSends(extend_comm_pkg); index = hypre_max(hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends)); int_buf_data = hypre_CTAlloc(HYPRE_BigInt, index, HYPRE_MEMORY_HOST); /* orig commpkg data*/ index = 0; HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; ++i) { int_buf_data[i - begin] = offset + (HYPRE_BigInt) IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)]; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, int_buf_data, OUT_marker); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /* now do the extend commpkg */ /* first we need to shift our position in the OUT_marker */ shift = recv_vec_starts[num_recvs]; e_out_marker = OUT_marker + shift; index = 0; begin = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, 0); end = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; ++i) { int_buf_data[i - begin] = offset + (HYPRE_BigInt) IN_marker[hypre_ParCSRCommPkgSendMapElmt(extend_comm_pkg, i)]; } comm_handle = hypre_ParCSRCommHandleCreate( 21, extend_comm_pkg, int_buf_data, e_out_marker); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* sort for non-ordered arrays */ HYPRE_Int hypre_ssort(HYPRE_BigInt *data, HYPRE_Int n) { HYPRE_Int i,si; HYPRE_Int change = 0; if(n > 0) for(i = n-1; i > 0; i--){ si = hypre_index_of_minimum(data,i+1); if(i != si) { hypre_swap_int(data, i, si); change = 1; } } return change; } /* Auxilary function for hypre_ssort */ HYPRE_Int hypre_index_of_minimum(HYPRE_BigInt *data, HYPRE_Int n) { HYPRE_Int answer; HYPRE_Int i; answer = 0; for(i = 1; i < n; i++) if(data[answer] < data[i]) answer = i; return answer; } void hypre_swap_int(HYPRE_BigInt *data, HYPRE_Int a, HYPRE_Int b) { HYPRE_BigInt temp; temp = data[a]; data[a] = data[b]; data[b] = temp; return; } /* Initialize CF_marker_offd, CF_marker, P_marker, P_marker_offd, tmp */ void hypre_initialize_vecs(HYPRE_Int diag_n, HYPRE_Int offd_n, HYPRE_Int *diag_ftc, HYPRE_BigInt *offd_ftc, HYPRE_Int *diag_pm, HYPRE_Int *offd_pm, HYPRE_Int *tmp_CF) { HYPRE_Int i; /* Quicker initialization */ if(offd_n < diag_n) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for(i = 0; i < offd_n; i++) { diag_ftc[i] = -1; offd_ftc[i] = -1; tmp_CF[i] = -1; if(diag_pm != NULL) { diag_pm[i] = -1; } if(offd_pm != NULL) { offd_pm[i] = -1;} } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for(i = offd_n; i < diag_n; i++) { diag_ftc[i] = -1; if(diag_pm != NULL) { diag_pm[i] = -1; } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for(i = 0; i < diag_n; i++) { diag_ftc[i] = -1; offd_ftc[i] = -1; tmp_CF[i] = -1; if(diag_pm != NULL) { diag_pm[i] = -1;} if(offd_pm != NULL) { offd_pm[i] = -1;} } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for(i = diag_n; i < offd_n; i++) { offd_ftc[i] = -1; tmp_CF[i] = -1; if(offd_pm != NULL) { offd_pm[i] = -1;} } } return; } /* Find nodes that are offd and are not contained in original offd * (neighbors of neighbors) */ static HYPRE_Int hypre_new_offd_nodes(HYPRE_BigInt **found, HYPRE_Int num_cols_A_offd, HYPRE_Int *A_ext_i, HYPRE_BigInt *A_ext_j, HYPRE_Int num_cols_S_offd, HYPRE_BigInt *col_map_offd, HYPRE_BigInt col_1, HYPRE_BigInt col_n, HYPRE_Int *Sop_i, HYPRE_BigInt *Sop_j, HYPRE_Int *CF_marker_offd) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif HYPRE_BigInt big_i1, big_k1; HYPRE_Int i, j, kk; HYPRE_Int got_loc, loc_col; /*HYPRE_Int min;*/ HYPRE_Int newoff = 0; #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedBigIntMap col_map_offd_inverse; hypre_UnorderedBigIntMapCreate(&col_map_offd_inverse, 2*num_cols_A_offd, 16*hypre_NumThreads()); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_A_offd; i++) { hypre_UnorderedBigIntMapPutIfAbsent(&col_map_offd_inverse, col_map_offd[i], i); } /* Find nodes that will be added to the off diag list */ HYPRE_Int size_offP = A_ext_i[num_cols_A_offd]; hypre_UnorderedBigIntSet set; hypre_UnorderedBigIntSetCreate(&set, size_offP, 16*hypre_NumThreads()); #pragma omp parallel private(i,j,big_i1) { #pragma omp for HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] < 0) { for (j = A_ext_i[i]; j < A_ext_i[i+1]; j++) { big_i1 = A_ext_j[j]; if(big_i1 < col_1 || big_i1 >= col_n) { if (!hypre_UnorderedBigIntSetContains(&set, big_i1)) { HYPRE_Int k = hypre_UnorderedBigIntMapGet(&col_map_offd_inverse, big_i1); if (-1 == k) { hypre_UnorderedBigIntSetPut(&set, big_i1); } else { A_ext_j[j] = -k - 1; } } } } for (j = Sop_i[i]; j < Sop_i[i+1]; j++) { big_i1 = Sop_j[j]; if(big_i1 < col_1 || big_i1 >= col_n) { if (!hypre_UnorderedBigIntSetContains(&set, big_i1)) { HYPRE_Int k = hypre_UnorderedBigIntMapGet(&col_map_offd_inverse, big_i1); if (-1 == k) { hypre_UnorderedBigIntSetPut(&set, big_i1); } else { Sop_j[j] = -k - 1; } } } } } /* CF_marker_offd[i] < 0 */ } /* for each row */ } /* omp parallel */ hypre_UnorderedBigIntMapDestroy(&col_map_offd_inverse); HYPRE_BigInt *tmp_found = hypre_UnorderedBigIntSetCopyToArray(&set, &newoff); hypre_UnorderedBigIntSetDestroy(&set); /* Put found in monotone increasing order */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif hypre_UnorderedBigIntMap tmp_found_inverse; if (newoff > 0) { hypre_big_sort_and_create_inverse_map(tmp_found, newoff, &tmp_found, &tmp_found_inverse); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif /* Set column indices for Sop and A_ext such that offd nodes are * negatively indexed */ #pragma omp parallel for private(kk,big_k1,got_loc,loc_col) HYPRE_SMP_SCHEDULE for(i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] < 0) { for(kk = Sop_i[i]; kk < Sop_i[i+1]; kk++) { big_k1 = Sop_j[kk]; if(big_k1 > -1 && (big_k1 < col_1 || big_k1 >= col_n)) { got_loc = hypre_UnorderedBigIntMapGet(&tmp_found_inverse, big_k1); loc_col = got_loc + num_cols_A_offd; Sop_j[kk] = (HYPRE_BigInt)(-loc_col - 1); } } for (kk = A_ext_i[i]; kk < A_ext_i[i+1]; kk++) { big_k1 = A_ext_j[kk]; if(big_k1 > -1 && (big_k1 < col_1 || big_k1 >= col_n)) { got_loc = hypre_UnorderedBigIntMapGet(&tmp_found_inverse, big_k1); loc_col = got_loc + num_cols_A_offd; A_ext_j[kk] = (HYPRE_BigInt)(-loc_col - 1); } } } } if (newoff) { hypre_UnorderedBigIntMapDestroy(&tmp_found_inverse); } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_Int size_offP; HYPRE_BigInt *tmp_found; HYPRE_Int min; HYPRE_Int ifound; size_offP = A_ext_i[num_cols_A_offd]+Sop_i[num_cols_A_offd]; tmp_found = hypre_CTAlloc(HYPRE_BigInt, size_offP, HYPRE_MEMORY_HOST); /* Find nodes that will be added to the off diag list */ for (i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] < 0) { for (j = A_ext_i[i]; j < A_ext_i[i+1]; j++) { big_i1 = A_ext_j[j]; if(big_i1 < col_1 || big_i1 >= col_n) { ifound = hypre_BigBinarySearch(col_map_offd,big_i1,num_cols_A_offd); if(ifound == -1) { tmp_found[newoff]=big_i1; newoff++; } else { A_ext_j[j] = (HYPRE_BigInt)(-ifound-1); } } } for (j = Sop_i[i]; j < Sop_i[i+1]; j++) { big_i1 = Sop_j[j]; if(big_i1 < col_1 || big_i1 >= col_n) { ifound = hypre_BigBinarySearch(col_map_offd,big_i1,num_cols_A_offd); if(ifound == -1) { tmp_found[newoff]=big_i1; newoff++; } else { Sop_j[j] = (HYPRE_BigInt)(-ifound-1); } } } } } /* Put found in monotone increasing order */ if (newoff > 0) { hypre_BigQsort0(tmp_found,0,newoff-1); ifound = tmp_found[0]; min = 1; for (i=1; i < newoff; i++) { if (tmp_found[i] > ifound) { ifound = tmp_found[i]; tmp_found[min++] = ifound; } } newoff = min; } /* Set column indices for Sop and A_ext such that offd nodes are * negatively indexed */ for(i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] < 0) { for(kk = Sop_i[i]; kk < Sop_i[i+1]; kk++) { big_k1 = Sop_j[kk]; if(big_k1 > -1 && (big_k1 < col_1 || big_k1 >= col_n)) { got_loc = hypre_BigBinarySearch(tmp_found,big_k1,newoff); if(got_loc > -1) loc_col = got_loc + num_cols_A_offd; Sop_j[kk] = (HYPRE_BigInt)(-loc_col - 1); } } for (kk = A_ext_i[i]; kk < A_ext_i[i+1]; kk++) { big_k1 = A_ext_j[kk]; if(big_k1 > -1 && (big_k1 < col_1 || big_k1 >= col_n)) { got_loc = hypre_BigBinarySearch(tmp_found,big_k1,newoff); loc_col = got_loc + num_cols_A_offd; A_ext_j[kk] = (HYPRE_BigInt)(-loc_col - 1); } } } } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ *found = tmp_found; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif return newoff; } HYPRE_Int hypre_exchange_marker(hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int *IN_marker, HYPRE_Int *OUT_marker) { HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); HYPRE_Int *int_buf_data = hypre_CTAlloc(HYPRE_Int, end, HYPRE_MEMORY_HOST); HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; ++i) { int_buf_data[i - begin] = IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)]; } hypre_ParCSRCommHandle *comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, OUT_marker); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_exchange_interp_data( HYPRE_Int **CF_marker_offd, HYPRE_Int **dof_func_offd, hypre_CSRMatrix **A_ext, HYPRE_Int *full_off_procNodes, hypre_CSRMatrix **Sop, hypre_ParCSRCommPkg **extend_comm_pkg, hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int skip_fine_or_same_sign) // skip_fine_or_same_sign if we want to skip fine points in S and nnz with the same sign as diagonal in A { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] -= hypre_MPI_Wtime(); #endif hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt *found = NULL; /*---------------------------------------------------------------------- * Get the off processors rows for A and S, associated with columns in * A_offd and S_offd. *---------------------------------------------------------------------*/ *CF_marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); hypre_exchange_marker(comm_pkg, CF_marker, *CF_marker_offd); hypre_ParCSRCommHandle *comm_handle_a_idx, *comm_handle_a_data; *A_ext = hypre_ParCSRMatrixExtractBExt_Overlap(A,A,1,&comm_handle_a_idx,&comm_handle_a_data,CF_marker,*CF_marker_offd,skip_fine_or_same_sign,skip_fine_or_same_sign); HYPRE_Int *A_ext_i = hypre_CSRMatrixI(*A_ext); HYPRE_BigInt *A_ext_j = hypre_CSRMatrixBigJ(*A_ext); HYPRE_Int A_ext_rows = hypre_CSRMatrixNumRows(*A_ext); hypre_ParCSRCommHandle *comm_handle_s_idx; *Sop = hypre_ParCSRMatrixExtractBExt_Overlap(S,A,0,&comm_handle_s_idx,NULL,CF_marker,*CF_marker_offd,skip_fine_or_same_sign,0); HYPRE_Int *Sop_i = hypre_CSRMatrixI(*Sop); HYPRE_BigInt *Sop_j = hypre_CSRMatrixBigJ(*Sop); HYPRE_Int Soprows = hypre_CSRMatrixNumRows(*Sop); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_s_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_s_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); send_idx = (HYPRE_Int *)comm_handle_a_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_a_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); /* Find nodes that are neighbors of neighbors, not found in offd */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] += hypre_MPI_Wtime(); #endif HYPRE_Int newoff = hypre_new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j, Soprows, col_map_offd, col_1, col_n, Sop_i, Sop_j, *CF_marker_offd); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] -= hypre_MPI_Wtime(); #endif if(newoff >= 0) *full_off_procNodes = newoff + num_cols_A_offd; else { return hypre_error_flag; } /* Possibly add new points and new processors to the comm_pkg, all * processors need new_comm_pkg */ /* AHB - create a new comm package just for extended info - this will work better with the assumed partition*/ hypre_ParCSRFindExtendCommPkg(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixFirstColDiag(A), hypre_CSRMatrixNumCols(A_diag), hypre_ParCSRMatrixColStarts(A), hypre_ParCSRMatrixAssumedPartition(A), newoff, found, extend_comm_pkg); *CF_marker_offd = hypre_TReAlloc(*CF_marker_offd, HYPRE_Int, *full_off_procNodes, HYPRE_MEMORY_HOST); hypre_exchange_marker(*extend_comm_pkg, CF_marker, *CF_marker_offd + A_ext_rows); if(num_functions > 1) { if (*full_off_procNodes > 0) *dof_func_offd = hypre_CTAlloc(HYPRE_Int, *full_off_procNodes, HYPRE_MEMORY_HOST); hypre_alt_insert_new_nodes(comm_pkg, *extend_comm_pkg, dof_func, *full_off_procNodes, *dof_func_offd); } hypre_TFree(found, HYPRE_MEMORY_HOST); HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_a_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_a_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } void hypre_build_interp_colmap(hypre_ParCSRMatrix *P, HYPRE_Int full_off_procNodes, HYPRE_Int *tmp_CF_marker_offd, HYPRE_BigInt *fine_to_coarse_offd) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif HYPRE_Int i, index; HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P->diag); HYPRE_Int P_offd_size = P->offd->i[n_fine]; HYPRE_Int *P_offd_j = P->offd->j; HYPRE_BigInt *col_map_offd_P = NULL; HYPRE_Int *P_marker = NULL; if (full_off_procNodes) P_marker = hypre_TAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < full_off_procNodes; i++) P_marker[i] = 0; #ifdef HYPRE_CONCURRENT_HOPSCOTCH /* These two loops set P_marker[i] to 1 if it appears in P_offd_j and if * tmp_CF_marker_offd has i marked. num_cols_P_offd is then set to the * total number of times P_marker is set */ #pragma omp parallel for private(i,index) HYPRE_SMP_SCHEDULE for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if(tmp_CF_marker_offd[index] >= 0) { P_marker[index] = 1; } } HYPRE_Int prefix_sum_workspace[hypre_NumThreads() + 1]; HYPRE_Int num_cols_P_offd = 0; #pragma omp parallel private(i) { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, full_off_procNodes); HYPRE_Int local_num_cols_P_offd = 0; for (i = i_begin; i < i_end; i++) { if (P_marker[i] == 1) local_num_cols_P_offd++; } hypre_prefix_sum(&local_num_cols_P_offd, &num_cols_P_offd, prefix_sum_workspace); #pragma omp master { if (num_cols_P_offd) col_map_offd_P = hypre_TAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); } #pragma omp barrier for (i = i_begin; i < i_end; i++) { if (P_marker[i] == 1) { col_map_offd_P[local_num_cols_P_offd++] = fine_to_coarse_offd[i]; } } } hypre_UnorderedBigIntMap col_map_offd_P_inverse; hypre_big_sort_and_create_inverse_map(col_map_offd_P, num_cols_P_offd, &col_map_offd_P, &col_map_offd_P_inverse); // find old idx -> new idx map #pragma omp parallel for for (i = 0; i < full_off_procNodes; i++) P_marker[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_P_inverse, fine_to_coarse_offd[i]); if (num_cols_P_offd) { hypre_UnorderedBigIntMapDestroy(&col_map_offd_P_inverse); } #pragma omp parallel for for(i = 0; i < P_offd_size; i++) P_offd_j[i] = P_marker[P_offd_j[i]]; #else /* HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_Int num_cols_P_offd = 0; HYPRE_Int j; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { if(tmp_CF_marker_offd[index] >= 0) { num_cols_P_offd++; P_marker[index] = 1; } } } if (num_cols_P_offd) { HYPRE_Int *tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); HYPRE_BigInt *tmp_marker = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for(i = 0; i < num_cols_P_offd; i++) { while( P_marker[index] == 0) index++; tmp_map_offd[i] = index++; } for(i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); index = 0; for(i = 0; i < num_cols_P_offd; i++) { while (P_marker[index] == 0) index++; col_map_offd_P[i] = fine_to_coarse_offd[index]; index++; } /* Sort the col_map_offd_P and P_offd_j correctly */ for(i = 0; i < num_cols_P_offd; i++) tmp_marker[i] = col_map_offd_P[i]; /* Check if sort actually changed anything */ if(hypre_ssort(col_map_offd_P,num_cols_P_offd)) { for(i = 0; i < P_offd_size; i++) for(j = 0; j < num_cols_P_offd; j++) if(tmp_marker[P_offd_j[i]] == col_map_offd_P[j]) { P_offd_j[i] = j; j = num_cols_P_offd; } } hypre_TFree(tmp_marker, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); } #endif /* HYPRE_CONCURRENT_HOPSCOTCH */ hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P->offd) = num_cols_P_offd; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif }
GB_unaryop__identity_uint64_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint64_int8 // op(A') function: GB_tran__identity_uint64_int8 // C type: uint64_t // A type: int8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint64_int8 ( uint64_t *Cx, // Cx and Ax may be aliased int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
basic_testing.h
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2010, Knut Reinert, FU Berlin // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de> // ========================================================================== // The SeqAn testing infrastructure. Based on ideas from the OpenMS // "ClassTest.h". // ========================================================================== // SEQAN_NO_GENERATED_FORWARDS #ifndef SEQAN_BASIC_BASIC_TESTING_H_ #define SEQAN_BASIC_BASIC_TESTING_H_ #include <iostream> // stdout, stderr #include <cstring> // strrpos #include <cstdlib> // exit() #include <cstdarg> // va_start, va_list, va_end #include <set> #include <vector> #include <string> #ifdef PLATFORM_WINDOWS #include <Windows.h> // DeleteFile() #else // #ifdef PLATFORM_WINDOWS #include <unistd.h> // unlink() #endif // #ifdef PLATFORM_WINDOWS // SeqAn's has three global debug/testing levels: testing, debug and // release. Depending on the level, the SEQAN_ASSERT_* and // SEQAN_CHECKPOINT macros will be enabled. // // Note that this is independent of the <cassert> assertions and // NDEBUG being defined. // // The levels are enabled by the values of the macros // SEQAN_ENABLE_TESTING and SEQAN_ENABLE_DEBUG. By setting a macro to // 0, one disables the level and by setting the macro to 1, one // enables a level. Enabling testing also enables debug, overriding a // value of 0 for SEQAN_ENABLE_DEBUG. // // If the level is release (both the macros for debug and testing are // 0), the assertions will be disabled. If the level is debug then // the assertions will be enabled. If the level is testing then the // checkpoint macros will also be enabled. // // The default is to enable debugging but disable testing. // // You can print the current level using the function seqan::printDebugLevel(). // Set default for SEQAN_ENABLE_TESTING. #ifndef SEQAN_ENABLE_TESTING #define SEQAN_ENABLE_TESTING 0 #endif // #ifndef SEQAN_ENABLE_TESTING // Set default for SEQAN_ENABLE_DEBUG. #ifndef SEQAN_ENABLE_DEBUG #define SEQAN_ENABLE_DEBUG 1 #endif // #ifndef SEQAN_ENABLE_DEBUG // Force-enable debugging if testing is enabled. #if SEQAN_ENABLE_TESTING #undef SEQAN_ENABLE_DEBUG #define SEQAN_ENABLE_DEBUG 1 #endif // #if SEQAN_ENABLE_TESTING // Allow disabling checkpoints independent of testing. #ifndef SEQAN_ENABLE_CHECKPOINTS #define SEQAN_ENABLE_CHECKPOINTS SEQAN_ENABLE_TESTING #endif // #ifndef SEQAN_ENABLE_CHECKPOINTS namespace seqan { // SEQAN_CXX_FLAGS_ contains the compiler flags, SEQAN_CXX_FLAGS is a string // literal with this value. #if !defined(SEQAN_CXX_FLAGS_) #define SEQAN_CXX_FLAGS_ SEQAN_CXX_FLAGS_NOT_SET #endif // !defined(SEQAN_CXX_FLAGS__) #define SEQAN_MKSTRING_(str) # str #define SEQAN_MKSTRING(str) SEQAN_MKSTRING_(str) #define SEQAN_CXX_FLAGS SEQAN_MKSTRING(SEQAN_CXX_FLAGS_) //#undef SEQAN_MKSTRING //#undef SEQAN_MKSTRING_ /** .Function.printDebugLevel: ..cat:Miscellaneous: ..summary:Print the current SeqAn debug level and the compiler flags to the given stream. ..signature:printDebugLevel(stream) ..param.stream:The stream to print to, e.g. $std::cout$. ..include:seqan/basic.h */ template <typename TStream> void printDebugLevel(TStream &stream) { stream << "SEQAN_ENABLE_DEBUG == " << SEQAN_ENABLE_DEBUG << std::endl; stream << "SEQAN_ENABLE_TESTING == " << SEQAN_ENABLE_TESTING << std::endl; stream << "SEQAN_ENABLE_CHECKPOINTS == " << SEQAN_ENABLE_CHECKPOINTS << std::endl; stream << "SEQAN_CXX_FLAGS == \"" << SEQAN_CXX_FLAGS << "\"" << std::endl; } // Namespace for the testing infrastructure. // // This namespace contains the variables and functions that are used // in the macros below to perform the tests. namespace ClassTest { // Raised when an assertion fails in test mode. struct AssertionFailedException {}; // Container for static global data for the tests. struct StaticData { // Number of tests that were run. static int &testCount() { static int result = 0; return result; } // Number of errors that occured. static int &errorCount() { static int result = 0; return result; } // Number of skipped tests. static int &skippedCount() { static int result = 0; return result; } // Flag whether there was an error in this test. static bool &thisTestOk() { static bool result = 0; return result; } // Flag whether this test was skipped. static bool &thisTestSkipped() { static bool result = 0; return result; } // Name of the current test. static const char *&currentTestName() { const char *defaultValue = ""; static const char *result = const_cast<char*>(defaultValue); return result; } // Base path to the binary. Extrapolated from __FILE__. static char *&basePath() { const char *defaultValue = "."; static char *result = const_cast<char*>(defaultValue); return result; } // Base path to the "projects" directory, extrapolated from // __FILE__. static char *&pathToProjects() { const char *defaultValue = "."; static char *result = const_cast<char*>(defaultValue); return result; } // Total number of checkpoints in header file. static int &totalCheckPointCount() { static int result = 0; return result; } // Total number of checkpoints found in binary files. static int &foundCheckPointCount() { static int result = 0; return result; } // Names of temporary files as returned by tempFileName. This // global state is used to remove any existing such files // after completing the testsuite. static ::std::vector<std::string> & tempFileNames() { static ::std::vector<std::string> filenames; return filenames; } }; // Open a temporary file, unlink it, return posix handle. Note: This has not been tested yet. // TODO(holtgrew): Not used yet and Windows code does not work. /* inline int openTempFile() { #ifdef PLATFORM_WINDOWS char * fileName = _tempnam(NULL, "SQN"); if (!fileName) { ::std::cerr << "Cannot create a unique temporary filename" << ::std::endl; exit(1); } int result = open(fileName, _O_RDWR | OPEN_TEMPORARY); free(fileName); return result; #else // A Unix... char filenameBuffer[100]; strcpy(filenameBuffer, "/tmp/SEQANXXXXXXXXXX"); int result = mkstemp(filenameBuffer); unlink(filenameBuffer); return result; #endif // ifdef PLATFORM_WINDOWS } */ // Return the path to a temporary file, in a static buffer in this // function. This is not thread safe! inline const char *tempFileName() { static char fileNameBuffer[100]; #ifdef PLATFORM_WINDOWS_VS char * fileName = tempnam(NULL, "SEQAN."); if (!fileName) { ::std::cerr << "Cannot create a unique temporary filename" << ::std::endl; exit(1); } strcpy(fileNameBuffer, fileName); free(fileName); StaticData::tempFileNames().push_back(fileNameBuffer); return fileNameBuffer; #else // ifdef PLATFORM_WINDOWS_VS strcpy(fileNameBuffer, "/tmp/SEQAN.XXXXXXXXXXXXXXXXXXXX"); #ifdef PLATFORM_WINDOWS_MINGW // There is no mkstemp in MinGW but it does not complain about tmpnam. tmpnam(fileNameBuffer); #else // ifdef PLATFORM_WINDOWS_MINGW mkstemp(fileNameBuffer); unlink(fileNameBuffer); #endif // #ifdef PLATFORM_WINDOWS_MINGW StaticData::tempFileNames().push_back(fileNameBuffer); return fileNameBuffer; #endif // ifdef PLATFORM_WINDOWS_VS } // Initialize the testing infrastructure. // // Used through SEQAN_BEGIN_TESTSUITE(test_name) inline void beginTestSuite(const char *testSuiteName, const char *argv0) { // First things first: Print the current debug level. printDebugLevel(std::cout); (void)testSuiteName; StaticData::testCount() = 0; StaticData::skippedCount() = 0; StaticData::errorCount() = 0; StaticData::totalCheckPointCount() = 0; StaticData::foundCheckPointCount() = 0; // Get path to argv0. const char *end = argv0; #ifdef PLATFORM_WINDOWS const char pathSeparator = '\\'; #else // PLATFORM_WINDOWS const char pathSeparator = '/'; #endif // PLATFORM_WINDOWS for (const char *ptr = strchr(argv0, pathSeparator); ptr != 0; ptr = strchr(ptr+1, pathSeparator)) end = ptr; int rpos = end - argv0; if (rpos <= 0) { StaticData::basePath() = new char[1]; strcpy(StaticData::basePath(), "."); } else { int len = rpos; StaticData::basePath() = new char[len]; strncpy(StaticData::basePath(), argv0, len); } // Get path to projects. const char *file = __FILE__; int pos = -1; for (size_t i = 0; i < strlen(file) - strlen("projects"); ++i) { if (strncmp(file + i, "projects", strlen("projects")) == 0) { pos = i; } } if (pos == -1) { std::cerr << "Could not extrapolate path to projects from __FILE__ == \"" << __FILE__ << "\"" << std::endl; exit(1); } StaticData::pathToProjects() = new char[pos]; strncpy(StaticData::pathToProjects(), file, pos); StaticData::pathToProjects()[pos-1] = '\0'; } // Run test suite finalization. // // Used through SEQAN_END_TESTSUITE // // Prints a bottom banner with the error count and returns the // program's return code. inline int endTestSuite() { delete[] StaticData::basePath(); delete[] StaticData::pathToProjects(); std::cout << "**************************************" << std::endl; std::cout << " Total Check Points : " << StaticData::totalCheckPointCount() << std::endl; std::cout << " Found Check Points : " << StaticData::foundCheckPointCount() << std::endl; std::cout << " Lost Check Points : " << StaticData::totalCheckPointCount() - StaticData::foundCheckPointCount() << std::endl; std::cout << "--------------------------------------" << std::endl; std::cout << " Total Tests: " << StaticData::testCount() << std::endl; std::cout << " Skipped: " << StaticData::skippedCount() << std::endl; std::cout << " Errors: " << StaticData::errorCount() << std::endl; std::cout << "**************************************" << std::endl; if (StaticData::errorCount() != 0) return 1; // TODO(holtgrew): Re-enable that all check points have to be found for the test to return 1; /* if (StaticData::totalCheckPointCount() != StaticData::foundCheckPointCount()) return 1; */ // Delete all temporary files that still exist. for (unsigned i = 0; i < StaticData::tempFileNames().size(); ++i) { #ifdef PLATFORM_WINDOWS DeleteFile(StaticData::tempFileNames()[i].c_str()); #else // #ifdef PLATFORM_WINDOWS unlink(StaticData::tempFileNames()[i].c_str()); #endif // #ifdef PLATFORM_WINDOWS } return 0; } // Run test initialization. inline void beginTest(const char *testName) { StaticData::currentTestName() = testName; StaticData::thisTestOk() = true; StaticData::thisTestSkipped() = false; StaticData::testCount() += 1; } // Run test finalization. inline void endTest() { if (StaticData::thisTestSkipped()) { std::cout << StaticData::currentTestName() << " SKIPPED" << std::endl; } else if (StaticData::thisTestOk()) { std::cout << StaticData::currentTestName() << " OK" << std::endl; } else { std::cerr << StaticData::currentTestName() << " FAILED" << std::endl; } } // Marks the current test as "skipped". inline void skipCurrentTest() { StaticData::thisTestSkipped() = true; StaticData::skippedCount() += 1; } // Called by the macro SEQAN_ASSERT_FAIL. inline void forceFail(const char *file, int line, const char *comment, ...) { StaticData::errorCount() += 1; std::cerr << file << ":" << line << " FAILED! "; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; } // Similar to forceFail above, but accepting a va_list parameter. inline void vforceFail(const char *file, int line, const char *comment, va_list argp) { StaticData::errorCount() += 1; std::cerr << file << ":" << line << " FAILED! "; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; } // Same as forceFail above, but with comment set to 0. inline void forceFail(const char *file, int line) { forceFail(file, line, 0); } // Called by the macro SEQAN_ASSERT_EQ. // // Tests that the given two value are equal. Returns true iff the // two values are equal. template <typename T1, typename T2> bool testEqual(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2, const char *comment, ...) { if (!(value1 == value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " == " << expression2 << " was: " << value1 << " != " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testEqual above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestEqual(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2, const char *comment, va_list argp) { if (!(value1 == value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " == " << expression2 << " was: " << value1 << " != " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testEqual above, but with comment set to 0. template <typename T1, typename T2> bool testEqual(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2) { return testEqual(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_IN_DELTA. // // Tests that the given two value are equal. Returns true iff the // two values are equal. template <typename T1, typename T2, typename T3> bool testInDelta(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2, const T3 &value3, const char *expression3, const char *comment, ...) { if (!(value1 >= value2 - value3 && value1 <= value2 + value3)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " in [" << expression2 << " - " << expression3 << ", " << expression2 << " + " << expression3 << "] was: " << value1 << " not in [" << value2 - value3 << ", " << value2 + value3 << "]"; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testInDelta above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2, typename T3> bool vtestInDelta(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2, const T3 &value3, const char *expression3, const char *comment, va_list argp) { if (!(value1 >= value2 - value3 && value1 <= value2 + value3)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " in [" << expression2 << " - " << expression3 << ", " << expression2 << " + " << expression3 << "] was: " << value1 << " not in [" << value2 - value3 << ", " << value2 + value3 << "]"; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testInDelta above, but with comment set to 0. template <typename T1, typename T2, typename T3> bool testInDelta(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2, const T3 &value3, const char *expression3) { return testInDelta(file, line, value1, expression1, value2, expression2, value3, expression3, 0); } // Called by the macro SEQAN_ASSERT_NEQ. // // Tests that the given two value are not equal. Returns true iff // the two values are equal. template <typename T1, typename T2> bool testNotEqual(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2, const char *comment, ...) { if (!(value1 != value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " != " << expression2 << " was: " << value1 << " == " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testNotEqual above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestNotEqual(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2, const char *comment, va_list argp) { if (!(value1 != value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " != " << expression2 << " was: " << value1 << " == " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testNotEqual above, but with comment set to 0. template <typename T1, typename T2> bool testNotEqual(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2) { return testNotEqual(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_GEQ. // // Tests that the first value is greater than or equal to the // second one. Returns true iff the test yields true. template <typename T1, typename T2> bool testGeq(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2, const char *comment, ...) { if (!(value1 >= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " >= " << expression2 << " was: " << value1 << " < " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testGeq above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestGeq(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2, const char *comment, va_list argp) { if (!(value1 >= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " >= " << expression2 << " was: " << value1 << " < " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testGeq above, but with comment set to 0. template <typename T1, typename T2> bool testGeq(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2) { return testGeq(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_GT. // // Tests that the first value is greater than the second one. // Returns true iff the test yields true. template <typename T1, typename T2> bool testGt(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2, const char *comment, ...) { if (!(value1 > value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " > " << expression2 << " was: " << value1 << " <= " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testGt above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestGt(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2, const char *comment, va_list argp) { if (!(value1 > value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " > " << expression2 << " was: " << value1 << " <= " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testGt above, but with comment set to 0. template <typename T1, typename T2> bool testGt(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2) { return testGt(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_LEQ. // // Tests that the first value is less than or equal to the second // one. Returns true iff the test yields true. template <typename T1, typename T2> bool testLeq(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2, const char *comment, ...) { if (!(value1 <= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " <= " << expression2 << " was: " << value1 << " > " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testLeq above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestLeq(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2, const char *comment, va_list argp) { if (!(value1 <= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " <= " << expression2 << " was: " << value1 << " > " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testLeq above, but with comment set to 0. template <typename T1, typename T2> bool testLeq(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2) { return testLeq(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_LT. // // Tests that the first value is greater than the second one. // Returns true iff the test yields true. template <typename T1, typename T2> bool testLt(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2, const char *comment, ...) { if (!(value1 < value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " < " << expression2 << " was: " << value1 << " >= " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testLt above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestLt(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2, const char *comment, va_list argp) { if (!(value1 < value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " < " << expression2 << " was: " << value1 << " >= " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testLt above, but comment is 0. template <typename T1, typename T2> bool testLt(const char *file, int line, const T1 &value1, const char *expression1, const T2 &value2, const char *expression2) { return testLt(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT. // // Test that the given argument evaluates to true. template <typename T> bool testTrue(const char *file, int line, const T &value_, const char *expression_, const char *comment, ...) { if (!(value_)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be true but was " << (value_); if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testTrue above, but accepts a va_list instead of variadic // parameters. template <typename T> bool vtestTrue(const char *file, int line, const T &value_, const char *expression_, const char *comment, va_list argp) { if (!(value_)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be true but was " << (value_); if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testTrue above, but comment will automatically be set to 0. template <typename T> bool testTrue(const char *file, int line, const T &value_, const char *expression_) { return testTrue(file, line, value_, expression_, 0); } // Called by the macro SEQAN_ASSERT. // // Test that the given argument evaluates to false. template <typename T> bool testFalse(const char *file, int line, const T &value_, const char *expression_, const char *comment, ...) { if (value_) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be false but was " << (value_); if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testFalse above, but accepts a va_list instead of variadic // parameters. template <typename T> bool vtestFalse(const char *file, int line, const T &value_, const char *expression_, const char *comment, va_list argp) { if (value_) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be false but was " << (value_); if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testFalse above, but comment will automatically be set to 0. template <typename T> bool testFalse(const char *file, int line, const T &value_, const char *expression_) { return testFalse(file, line, value_, expression_, 0); } // Represents a check point in a file. struct CheckPoint { // Path to the file. const char *file; // Line in the file. unsigned int line; // Less-than comparator for check points. bool operator<(const CheckPoint &other) const { int c = strcmp(file, other.file); if (c < 0) return true; if (c == 0 && line < other.line) return true; return false; } }; // Wrapper for a set of check points. // TODO(holtgrew): Simply store the set? struct CheckPointStore { static ::std::set<CheckPoint> &data() { static ::std::set<CheckPoint> result; return result; } }; // Puts the given check point into the CheckPointStore's data. inline bool registerCheckPoint(unsigned int line, const char *file) { const char *file_name = strrchr(file, '/'); const char *file_name_2 = strrchr(file, '\\'); if (file_name_2 > file_name) file_name = file_name_2; if (!file_name) file_name = file; else ++file_name; CheckPoint cp = {file_name, line}; #ifdef _OMP #pragma omp critical #endif // #ifdef _OMP CheckPointStore::data().insert(cp); return true; } // Test whether the given check point exists in the check point // store. inline void testCheckPoint(const char *file, unsigned int line) { StaticData::totalCheckPointCount() += 1; CheckPoint cp = {file, line}; if (CheckPointStore::data().find(cp) == CheckPointStore::data().end()) { std::cerr << file << ":" << line << " -- Check point lost." << std::endl; return; } StaticData::foundCheckPointCount() += 1; } // Verify the check points for the given file. inline void verifyCheckPoints(const char *file) { char const* file_name = strrchr(file, '/'); char const* file_name_2 = strrchr(file, '\\'); if (file_name_2 > file_name) file_name = file_name_2; if (!file_name) file_name = file; else ++file_name; int len = strlen(StaticData::pathToProjects()) + strlen("/") + strlen(file) + 1; char *absolutePath = new char[len]; absolutePath[0] = '\0'; strcat(absolutePath, StaticData::pathToProjects()); strcat(absolutePath, "/"); strcat(absolutePath, file); FILE * fl = ::std::fopen(absolutePath, "r"); delete[] absolutePath; if (!fl) { std::cerr << file << " -- verifyCheckPoints could not find this file." << std::endl; } unsigned int line_number = 1; char buf[1<<16]; while (::std::fgets(buf, sizeof(buf), fl)) { if (::std::strstr(buf, "SEQAN_CHECKPOINT")) { testCheckPoint(file_name, line_number); } ++line_number; } ::std::fclose(fl); } #if SEQAN_ENABLE_TESTING // If in testing mode then raise an AssertionFailedException. inline void fail() { StaticData::thisTestOk() = false; throw AssertionFailedException(); } #else // If not in testing mode then quit with an abort. inline void fail() { abort(); } #endif // #if SEQAN_ENABLE_TESTING } // namespace ClassTest // This macro expands to function header for one test. #define SEQAN_DEFINE_TEST(test_name) \ template <bool speed_up_dummy_to_prevent_compilation_of_unused_tests_> void SEQAN_TEST_ ## test_name () #if SEQAN_ENABLE_TESTING // This macro expands to startup code for a test file. #define SEQAN_BEGIN_TESTSUITE(suite_name) \ int main(int argc, char **argv) { \ (void) argc; \ ::seqan::ClassTest::beginTestSuite(#suite_name, argv[0]); // This macro expands to shutdown code for a test file. #define SEQAN_END_TESTSUITE \ return ::seqan::ClassTest::endTestSuite(); \ } // This macro expands to code to call a given test. #define SEQAN_CALL_TEST(test_name) \ do { \ ::seqan::ClassTest::beginTest(#test_name); \ try { \ SEQAN_TEST_ ## test_name<true>(); \ } catch(::seqan::ClassTest::AssertionFailedException e) { \ /* Swallow exception, go on with next test. */ \ (void) e; /* Get rid of unused variable warning. */ \ } \ ::seqan::ClassTest::endTest(); \ } while (false) // This macro returns from the current function and logs a "skipped" // event for the current test. #define SEQAN_SKIP_TEST \ do { \ ::seqan::ClassTest::skipCurrentTest(); \ return; \ } while (false) #endif // #if SEQAN_ENABLE_TESTING // variadic macros are not supported by VS 2003 and before #if !defined(_MSC_VER) || (_MSC_VER >= 1400) #if SEQAN_ENABLE_DEBUG // Force a test failure. // // Usage: SEQAN_ASSERT_FAIL("Failed at position %d", pos); #define SEQAN_ASSERT_FAIL(...) \ do { \ ::seqan::ClassTest::forceFail(__FILE__, __LINE__, \ __VA_ARGS__); \ ::seqan::ClassTest::fail(); \ } while (false) // Equality assertion without a comment. // // Usage: SEQAN_ASSERT_EQ(4, 4); #define SEQAN_ASSERT_EQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \ (_arg1), #_arg1, \ (_arg2), #_arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Equality assertion with a comment. // // Usage: SEQAN_ASSERT_EQ(4, 4); #define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \ (_arg1), #_arg1, \ (_arg2), #_arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // In-delta-environment assertion without a comment. // // Usage: SEQAN_ASSERT_IN_DELTA(4.1, 4, 0.1); #define SEQAN_ASSERT_IN_DELTA(_arg1, _arg2, _arg3) \ do { \ if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \ (_arg1), #_arg1, \ (_arg2), #_arg2, \ (_arg3), #_arg3)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // In-delta-environment assertion witha comment. // // Usage: SEQAN_ASSERT_IN_DELTA_MSG(4.1, 4, 0.1, "3.9 <= 4.1 <= 4.1"); #define SEQAN_ASSERT_IN_DELTA_MSG(_arg1, _arg2, _arg3, ...) \ do { \ if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \ (_arg1), #_arg1, \ (_arg2), #_arg2, \ (_arg3), #_arg3, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Inequality assertion without a comment. // // Usage: SEQAN_ASSERT_NEQ(4, 5); #define SEQAN_ASSERT_NEQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \ (_arg1), #_arg1, \ (_arg2), #_arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Inequality assertion with a comment. // // Usage: SEQAN_ASSERT_NEQ(4, 5); #define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \ (_arg1), #_arg1, \ (_arg2), #_arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than-or-equal assertion without a comment. #define SEQAN_ASSERT_LEQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \ (_arg1), #_arg1, \ (_arg2), #_arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than-or-equal assertion with a comment. #define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \ (_arg1), #_arg1, \ (_arg2), #_arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than assertion without a comment. #define SEQAN_ASSERT_LT(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \ (_arg1), #_arg1, \ (_arg2), #_arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than assertion with a comment. #define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \ (_arg1), #_arg1, \ (_arg2), #_arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than-or-equal assertion without a comment. #define SEQAN_ASSERT_GEQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \ (_arg1), #_arg1, \ (_arg2), #_arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than-or-equal assertion with a comment. #define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \ (_arg1), #_arg1, \ (_arg2), #_arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than assertion without a comment. #define SEQAN_ASSERT_GT(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \ (_arg1), #_arg1, \ (_arg2), #_arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than assertion with a comment. #define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \ (_arg1), #_arg1, \ (_arg2), #_arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // TODO(holtgrew): Rename to SEQAN_ASSERT_TRUE once that name is free.; // Trueness assertion with a comment. // // Usage: SEQAN_ASSERT_TRUE(false); #define SEQAN_ASSERT_TRUE(_arg1) \ do { \ if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \ (_arg1), #_arg1)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // TODO(holtgrew): Rename to SEQAN_ASSERT_TRUE once that name is free.; // Trueness assertion with a comment. #define SEQAN_ASSERT_TRUE_MSG(_arg1, ...) \ do { \ if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \ (_arg1), #_arg1, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Falseness assertion without a comment. // // Usage: SEQAN_ASSERT_NOT(false); #define SEQAN_ASSERT_NOT(_arg1) \ do { \ if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \ (_arg1), #_arg1)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Falseness assertion with a comment. #define SEQAN_ASSERT_NOT_MSG(_arg1, ...) \ do { \ if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \ (_arg1), #_arg1, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) #else // #if SEQAN_ENABLE_DEBUG #define SEQAN_ASSERT_EQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_NEQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_LEQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_LT(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_GEQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_GT(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_TRUE(_arg1) do {} while (false) #define SEQAN_ASSERT_TRUE_MSG(_arg1, ...) do {} while (false) #define SEQAN_ASSERT_NOT(_arg1) do {} while (false) #define SEQAN_ASSERT_NOT_MSG(_arg1, ...) do {} while (false) #define SEQAN_ASSERT_FAIL(...) do {} while (false) #endif // #if SEQAN_ENABLE_DEBUG #else // no variadic macros #if SEQAN_ENABLE_DEBUG inline void SEQAN_ASSERT_FAIL(const char *comment, ...) { va_list args; va_start(args, comment); ::seqan::ClassTest::vforceFail("", 0, comment, args); ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA(T1 const &_arg1, T2 const &_arg2, T3 const &_arg3) { if (!::seqan::ClassTest::testInDelta("", 0, _arg1, "", _arg2, "", _arg3, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA_MSG(T1 const &_arg1, T2 const &_arg2, T3 const &_arg3, const char *comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestInDelta("", 0, _arg1, "", _arg2, "", _arg3, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_EQ(T1 const &_arg1, T2 const &_arg2) { if (!::seqan::ClassTest::testEqual("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_EQ_MSG(T1 const &_arg1, T2 const &_arg2, const char *comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestEqual("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_NEQ(T1 const &_arg1, T2 const &_arg2) { if (!::seqan::ClassTest::testNotEqual("", _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_NEQ_MSG(T1 const &_arg1, T2 const &_arg2, const char *comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestNotEqual("", _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_LEQ(T1 const &_arg1, T2 const &_arg2) { if (!::seqan::ClassTest::testLeq("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_LEQ_MSG(T1 const &_arg1, T2 const &_arg2, const char *comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestLeq("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_LT(T1 const &_arg1, T2 const &_arg2) { if (!::seqan::ClassTest::testLt("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_LT_MSG(T1 const &_arg1, T2 const &_arg2, const char *comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestLt("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_GEQ(T1 const &_arg1, T2 const &_arg2) { if (!::seqan::ClassTest::testGeq("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_GEQ_MSG(T1 const &_arg1, T2 const &_arg2, const char *comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestGeq("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_GT(T1 const &_arg1, T2 const &_arg2) { if (!::seqan::ClassTest::testGt("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_GT_MSG(T1 const &_arg1, T2 const &_arg2, const char *comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestGt("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1> void SEQAN_ASSERT_TRUE(T1 const &_arg1) { if (!::seqan::ClassTest::testTrue("", 0, _arg1, "")) ::seqan::ClassTest::fail(); } template <typename T1> void SEQAN_ASSERT_TRUE_MSG(T1 const &_arg1, const char *comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestTrue("", 0, _arg1, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1> void SEQAN_ASSERT_NOT(T1 const &_arg1) { if (!::seqan::ClassTest::testFalse("", 0, _arg1, "")) ::seqan::ClassTest::fail(); } template <typename T1> void SEQAN_ASSERT_NOT_MSG(T1 const &_arg1, const char *comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestFalse("", 0, _arg1, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } #else // #if SEQAN_ENABLE_DEBUG inline void SEQAN_ASSERT_FAIL(const char *comment, ...) {} template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA(T1 const &_arg1, T2 const &_arg2, T3 const &_arg3) {} template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA_MSG(T1 const &_arg1, T2 const &_arg2, T3 const &_arg3, const char *comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_EQ(T1 const &_arg1, T2 const &_arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_EQ_MSG(T1 const &_arg1, T2 const &_arg2, const char *comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_NEQ(T1 const &_arg1, T2 const &_arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_NEQ_MSG(T1 const &_arg1, T2 const &_arg2, const char *comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_LEQ(T1 const &_arg1, T2 const &_arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_LEQ_MSG(T1 const &_arg1, T2 const &_arg2, const char *comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_LT(T1 const &_arg1, T2 const &_arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_LT_MSG(T1 const &_arg1, T2 const &_arg2, const char *comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_GEQ(T1 const &_arg1, T2 const &_arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_GEQ_MSG(T1 const &_arg1, T2 const &_arg2, const char *comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_GT(T1 const &_arg1, T2 const &_arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_GT_MSG(T1 const &_arg1, T2 const &_arg2, const char *comment, ...) {} template <typename T1> void SEQAN_ASSERT_TRUE(T1 const &_arg1) {} template <typename T1> void SEQAN_ASSERT_TRUE_MSG(T1 const &_arg1, const char *comment, ...) {} template <typename T1> void SEQAN_ASSERT_NOT(T1 const &_arg1) {} template <typename T1> void SEQAN_ASSERT_NOT_MSG(T1 const &_arg1, const char *comment, ...) {} #endif // #if SEQAN_ENABLE_DEBUG #endif // no variadic macros // Returns a string (of type char*) with the path to the called binary. // // Use this to locate files relative to the test binary. #define SEQAN_PROGRAM_PATH \ ::seqan::ClassTest::StaticData::basePath() // Returns a const char * string with the path to the projects directory. #define SEQAN_PATH_TO_PROJECTS() \ ::seqan::ClassTest::StaticData::pathToProjects() // Returns the POSIX int file handle to an open file. // TODO(holtgrewe): Uncomment if openTempFile has been implemented. // #define SEQAN_OPEN_TEMP_FILE() (::seqan::ClassTest::openTempFile()) // Returns a temporary filename. #define SEQAN_TEMP_FILENAME() (::seqan::ClassTest::tempFileName()) #if SEQAN_ENABLE_CHECKPOINTS // Create a check point at the point where the macro is placed. // TODO(holtgrew): Should be called SEQAN_CHECK_POINT to be consistent. #define SEQAN_CHECKPOINT \ ::seqan::ClassTest::registerCheckPoint(__LINE__, __FILE__); // Call the check point verification code for the given file. #define SEQAN_VERIFY_CHECKPOINTS(filename) \ ::seqan::ClassTest::verifyCheckPoints(filename) #else // #if SEQAN_ENABLE_CHECKPOINTS #define SEQAN_CHECKPOINT // If checkpoints are to be verified if testing is disabled then print // a warning. #define SEQAN_VERIFY_CHECKPOINTS(filename) \ do { \ fprintf(stderr, ("WARNING: Check point verification is " \ "disabled. Trying to verify %s from %s:%d.\n"), \ filename, __FILE__, __LINE__); \ } while(false) #endif // #if SEQAN_ENABLE_CHECKPOINTS #if !SEQAN_ENABLE_TESTING #define SEQAN_BEGIN_TESTSUITE(suite_name) \ int main(int argc, char **argv) { \ (void) argc; \ (void) argv; \ fprintf(stderr, "Warning: SEQAN_ENABLE_TESTING is wrong and you used the macro SEQAN_BEGIN_TESTSUITE!\n"); #define SEQAN_END_TESTSUITE } #define SEQAN_CALL_TEST(test_name) do { SEQAN_TEST_ ## test_name(); } while (false) #define SEQAN_SKIP_TEST do {} while (false) #endif // #if !SEQAN_ENABLE_TESTING } // namespace seqan #endif // SEQAN_BASIC_BASIC_TESTING_H_
GB_unop__identity_int16_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int16_int8) // op(A') function: GB (_unop_tran__identity_int16_int8) // C type: int16_t // A type: int8_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = (int16_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int16_int8) ( int16_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int16_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__round_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__round_fc64_fc64 // op(A') function: GB_unop_tran__round_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_cround (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cround (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_cround (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ROUND || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__round_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cround (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__round_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
OpenMP.c
// Must be compiled with -fopenmp flag to enable OpenMP #include <stdio.h> #include <stdlib.h> #include <time.h> void initializeMatrix(long long *matrix, const int rows, const int columns, const long long maxValue) { for (int row = 0; row < rows; row++) { for (int column = 0; column < columns; column++) { matrix[row * columns + column] = rand(); if (maxValue > 0) { matrix[row * columns + column] %= maxValue; } } } } int addMatrices(long long *resultMatrix, const long long *matrixA, const int aRows, const int aColumns, const long long *matrixB, const int bRows, const int bColumns) { if (aRows == bRows && aColumns == bColumns) { const int n = aRows; const int m = aColumns; #pragma omp parallel for for (int row = 0; row < n; row++) { #pragma omp parallel for for (int column = 0; column < m; column++) { resultMatrix[row * m + column] = matrixA[row * m + column] + matrixB[row * m + column]; } } return EXIT_SUCCESS; } return EXIT_FAILURE; } int multiplyMatrices(long long *resultMatrix, const long long *matrixA, const int aRows, const int aColumns, const long long *matrixB, const int bRows, const int bColumns) { if (aColumns != bRows) { return EXIT_FAILURE; } const int n = aRows; const int m = aColumns; const int p = bColumns; #pragma omp parallel for for (int row = 0; row < n; row++) { #pragma omp parallel for for (int column = 0; column < p; column++) { resultMatrix[row * p + column] = 0; for (int k = 0; k < m; k++) { resultMatrix[row * p + column] += matrixA[row * m + k] * matrixB[k * p + column]; } } } return EXIT_SUCCESS; } void printMatrix(long long *matrix, int rows, int columns) { for (int row = 0; row < rows; row++) { for (int column = 0; column < columns; column++) { printf("%lli\t", matrix[row * columns + column]); } puts(""); } } #define N 4 #define M 5 #define P 6 int main(int argc, char **argv) { srand((unsigned int) time(NULL)); // Vector multipication // A is an n x m matrix long long matrixA[N][M]; // B is an m x p matrix long long matrixB[M][P]; // AB is an n x p matrix long long matrixAB[N][P]; // Vector addition // X is an n x m matrix long long matrixX[N][M]; // Y is an n x m matrix long long matrixY[N][M]; // Z is an n x m matrix long long matrixZ[N][M]; // Vector multipication initializeMatrix((long long *) matrixA, N, M, 100); initializeMatrix((long long *) matrixB, M, P, 100); // Vector addition initializeMatrix((long long *) matrixX, N, M, 100); initializeMatrix((long long *) matrixY, N, M, 100); clock_t endTime; clock_t startTime = clock(); // Vector multipication multiplyMatrices((long long *) matrixAB, (const long long *) matrixA, N, M, (const long long *) matrixB, M, P); // Vector addition addMatrices((long long *) matrixZ, (const long long *) matrixX, N, M, (const long long *) matrixX, N, M); endTime = clock(); // Vector multipication puts("Matrix A:"); printMatrix((long long *) matrixA, N, M); puts(""); puts("Matrix B:"); printMatrix((long long *) matrixB, M, P); puts(""); puts("Matrix AB:"); printMatrix((long long *) matrixAB, N, P); puts(""); // Vector addition puts("Matrix X:"); printMatrix((long long *) matrixX, N, M); puts(""); puts("Matrix Y:"); printMatrix((long long *) matrixY, N, M); puts(""); puts("Matrix X+Y:"); printMatrix((long long *) matrixZ, N, M); puts(""); printf("Elapsed CPU time: %f seconds", (endTime - startTime) * 1.0 / CLOCKS_PER_SEC); return EXIT_SUCCESS; }
logit_loss_delta.h
/** * Copyright (c) 2015 by Contributors */ #ifndef ZDIFACTO_LOSS_LOGIT_LOSS_DELTA_H_ #define ZDIFACTO_LOSS_LOGIT_LOSS_DELTA_H_ #include <cmath> #include <vector> #include "zdifacto/loss.h" #include "zdifacto/sarray.h" #include "common/range.h" #include "common/spmv.h" #include "dmlc/omp.h" #include "dmlc/logging.h" namespace zdifacto { /** * \brief parameters for \ref LogitLossDelta */ struct LogitLossDeltaParam : public dmlc::Parameter<LogitLossDeltaParam> { /** * \brief if or not compute the hession matrix * 0 : no * 1 : diagnal hession matrix * 2 : the upper bound of the diagnal hession */ int compute_hession; DMLC_DECLARE_PARAMETER(LogitLossDeltaParam) { DMLC_DECLARE_FIELD(compute_hession).set_range(0, 2).set_default(1); } }; /** * \brief the logistic loss, specialized for block coordinate descent * * :math:`\ell(x,y,w) = log(1 + exp(- y <w, x>))` * * \ref LogitLossDelta is feeded with X' (the tranpose of X, in row-major * format) and delta w each time, and is able to compute the second order * gradients. * * Note: One can use \ref LogitLoss for ordinary logitis loss, namely given * X and w each time. */ class LogitLossDelta : public Loss { public: /** \brief constructor */ LogitLossDelta() { } /** \brief deconstructor */ virtual ~LogitLossDelta() { } KWArgs Init(const KWArgs& kwargs) override { return param_.InitAllowUnknown(kwargs); } /** * @param data X', the transpose of X * * pred += X * delta_w * * @param param input parameters * - param[1], real_t vector, the delta weight, namely new_w - old_w * - param[2], optional int vector, the weight positions * @param pred predict output, should be pre-allocated */ void Predict(const dmlc::RowBlock<unsigned>& data, const std::vector<SArray<char>>& param, SArray<real_t>* pred) override { int psize = param.size(); CHECK_GE(psize, 1); CHECK_LE(psize, 2); SArray<real_t> delta_w(param[0]); SArray<int> w_pos = psize == 2 ? SArray<int>(param[1]) : SArray<int>(); SpMV::TransTimes(data, delta_w, pred, nthreads_, w_pos, {}); } /** * \brief compute the gradients * * tau = 1 / (1 + exp(y .* pred)) * first order grad * f'(w) = - X' * (tau .* y) * diagnal second order grad : * f''(w) = (X.*X)' * (tau .* (1-tau)) * * @param data X', the transpose of X * @param param input parameters * - param[0], real_t vector, the predict output * - param[1], optional int vector, the gradient positions * - param[2], optional real_t vectorreal_t, the delta needed if * compute_diag_hession == 2 * @param grad gradient output, should be preallocated */ void CalcGrad(const dmlc::RowBlock<unsigned>& data, const std::vector<SArray<char>>& param, SArray<real_t>* grad) override { int psize = param.size(); CHECK_GE(psize, 1); CHECK_LE(psize, 3); if (grad->empty()) return; // p = ... SArray<real_t> p; p.CopyFrom(SArray<real_t>(param[0])); CHECK_NOTNULL(data.label); #pragma omp parallel for num_threads(nthreads_) for (size_t i = 0; i < p.size(); ++i) { real_t y = data.label[i] > 0 ? 1 : -1; p[i] = - y / (1 + std::exp(y * p[i])); } // grad = ... SArray<int> grad_pos = psize > 1 ? SArray<int>(param[1]) : SArray<int>(); if (param_.compute_hession != 0) CHECK(!grad_pos.empty()); SpMV::Times(data, p, grad, nthreads_, {}, grad_pos); if (param_.compute_hession == 0) return; // h = ... SArray<int> h_pos; h_pos.CopyFrom(grad_pos); for (size_t i = 0; i < h_pos.size(); ++i) { if (h_pos[i] >= 0) ++h_pos[i]; } // compute X .* X dmlc::RowBlock<unsigned> XX = data; SArray<dmlc::real_t> xx_value; if (data.value) { xx_value.resize(data.offset[data.size]); for (size_t i = data.offset[0]; i < data.offset[data.size]; ++i) { xx_value[i] = data.value[i] * data.value[i]; } XX.value = xx_value.data(); } // p = tau * (1 - tau) #pragma omp parallel for num_threads(nthreads_) for (size_t i = 0; i < p.size(); ++i) { real_t y = data.label[i] > 0 ? 1 : -1; p[i] = - p[i] * (y + p[i]); } if (param_.compute_hession == 1) { SpMV::Times(XX, p, grad, nthreads_, {}, h_pos); } else if (param_.compute_hession == 2) { LOG(FATAL) << "..."; CHECK_EQ(psize, 3); SArray<real_t> delta(param[2]); // TODO(mli) } else { LOG(FATAL) << "..."; } } private: LogitLossDeltaParam param_; }; } // namespace difacto #endif // DIFACTO_LOSS_LOGIT_LOSS_DELTA_H_
target-1.c
/* { dg-do compile } */ void foo (int x) { bad1: #pragma omp target goto bad1; // { dg-error "invalid branch to/from OpenMP structured block" } goto bad2; // { dg-error "invalid entry to OpenMP structured block" } #pragma omp target { bad2: ; } #pragma omp target { int i; goto ok1; for (i = 0; i < 10; ++i) { ok1: break; } } switch (x) // { dg-error "invalid entry to OpenMP structured block" } { #pragma omp target // { dg-warning "statement will never be executed" } { case 0:; } } }
GB_unaryop__identity_int16_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int16_uint64 // op(A') function: GB_tran__identity_int16_uint64 // C type: int16_t // A type: uint64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int16_uint64 ( int16_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int16_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
linemax-cc.h
/** * @author Aaron Tikuisis, modified by Matthew Arnold * @file linemax-cc.h Implementation of line max * * $Id$ * * K-Best Rescoring Module * * Technologies langagieres interactives / Interactive Language Technologies * Inst. de technologie de l'information / Institute for Information Technology * Conseil national de recherches Canada / National Research Council Canada * Copyright 2005, Sa Majeste la Reine du Chef du Canada / * Copyright 2005, Her Majesty in Right of Canada */ #include "rescoring_general.h" #include "bleu.h" #include <algorithm> #include <cassert> #include "portage_defs.h" namespace Portage { /*Comment on fix: The bug fixed was in findSentenceIntervals. In some occasions it was returning values out of range for gammas (inf, nan) so a fix was added in to prevent that and the second problem was that on some occasions it picked values that were already chosen (ie. ie said that the "line" for sentence 2 crossed the target line more than once, which is impossible, so a fix was added in to prevent a k value from being selected more than once, and not just prevent the selection of the previous k-value. */ /** * Check if -inf < x < inf => x is finite * @param x operand * @return Returns true if x is finite */ inline bool finite(double x) { return (x != INFINITY && x != -INFINITY); } template <class ScoreStats> void LineMax<ScoreStats>::findSentenceIntervals(Uint &numchanges, double *& gamma, ScoreStats *& dBLEU, ScoreStats & curScore, linemaxpt *& myHeappoint, int s, const uVector& p, const uVector& dir, const uMatrix& H, const vector<ScoreStats>& scoreStats) // If a heap-point is not added, myHeappoint will be NULL. // All arrays indexed by s are replaced here by their s-th entry, ie.. // numChanges = numChanges[s], // gamma = gamma[s], // dBLEU = dBLEU[s], // H = H[s], // scoreStats = scoreStats[s] { using namespace boost::numeric; /* For the given source sentence (f_s): The estimation function, \hat{e}(f_s, \lambda) is defined as follows: \hat{e}(f_s, \lambda) = argmax_{0 <= k < K} ( \sum_{m=1}^M \lambda_m h_m(e_k, f_s) ) = max_index (H * \lambda) (H is the K x M matrix whose (k,m)-th entry is h_m(e_k, f_s).) We consider the values of \hat{e} as we vary \lambda along the line: p + \gamma * dir. As a function of \gamma (abusing notation), we have \hat{e}(\gamma) = max_index (H * p + \gamma * H * dir). Denote A = \gamma * H * dir, B = H * p. Let L(\gamma) = \gamma * A + B, and let l_k(\gamma) be the k-th entry in L(\gamma). Each l_k(\gamma) is clearly a line, called the k-th line. Clearly, the value of \hat{e}(\gamma) can only change at points where two different lines, l_i(\gamma) and l_j(\gamma) intersect. We do the following to completely determine the function \hat{e}(\gamma): i) Determine some point \gamma_0 such that no lines intersect for any \gamma <= \gamma_0 ii) Determine oldk = max_index L(\gamma_0). Let oldgamma = \gamma_0. iii) Determine newgamma = min (\gamma coordinate of intersection between l_k and l_oldk), newk = argmin (\gamma coordinate of intersection between l_k and l_oldk), where min, argmin range over all k which are different from newk and for which the intersection happens after oldgamma. If there is no such newgamma, go to step (v). iv) It has been determined that \hat{e} takes the value oldk on (oldgamma, newgamma), (or (-\infty, newgamma) if oldgamma = \gamma_0). Set oldk = newk, oldgamma = newgamma, and go to step (iii) v) It has been determined that \hat{e} takes the value oldk on (oldgamma, \infty), (or (-\infty, \infty) if oldgamma = \gamma_0). \hat{e} has been determined piecewise, so done. We store each newgamma (in ascending order) in gamma (an array), and the number of newgamma's in numchanges. Since there are K different lines, we know a priori that there are at most (K-1) newgamma's to store. In practice here, we can forget the specific values of \hat{e} but remember their contribution to the BLEU score. Thus, we tally up the total statistics for the BLEU score at \gamma_0 in curScore, and for the change in \hat{e} at gamma[i], we store the change in BLEU statistics in dBLEU[i]. */ assert(H.size1() == scoreStats.size()); const Uint K(H.size1()); const Uint M(H.size2()); const uVector A(ublas::prec_prod(H, dir)); const uVector B(ublas::prec_prod(H, p)); uVector C(K); uVector pt(M); double sortA[K]; numchanges = 0; bool found[K]; //array to track sentences we've seen fill(found, found+K, false); // Find all the cusps along the curve max_{k} (A[k]*x + B[k]) // First, find an x-coordinate that occurs before any cusp for (Uint k(0); k<K; ++k) { sortA[k] = A(k); if (isnan(sortA[k])) sortA[k] = -INFINITY; } // for sort(sortA, sortA + K); double minDA = INFINITY; for (Uint k(0); k<K-1; ++k) { if (finite(sortA[k]) && finite(sortA[k+1]) && sortA[k+1] != sortA[k]) { minDA = min(minDA, sortA[k+1] - sortA[k]); } // if } // for double oldgamma(0.0f); if (minDA == INFINITY) { oldgamma = INFINITY; } else { double minB = INFINITY; double maxB = -INFINITY; for (Uint k(0); k<K; ++k) { const double x(B(k)); if (finite(x)) { minB = min(minB, x); maxB = max(maxB, x); } // if } // for oldgamma = (minB - maxB) / minDA; // oldgamma = min_{i1,j1,i2,j2} (B[i1] - B[j1]) / (A[j2] - A[i2]) // <= min_{i,j} (B[i] - B[j]) / (A[j] - A[i]) // <= min_{i,j} (B[i] - B[j]) / (A[j] - A[i]) // oldgamma - 1 would be our \gamma_0 (for step (i)), if we don't // have any infinite values in A. } // If any entry in A is +/-INFINITY, then the "line" for that candidate // sentence will take values from {+INFINITY, -INFINITY, NaN}. When // the "line" changes from one of these values to another, consider // that to be an intersection point of that "line" with every other // line. // The value can only change at gamma where the vector p + gamma * dir // contains an entry of 0. // Here, we find the minimum gamma such that p + gamma * dir has a zero // entry, and the final \gamma_0 will be less than the minimum of this // and the previous oldgamma. // pt_m = -p_m / dir_m, for all m // ie. pt_m is the gamma s.t. p + gamma * dir = 0 pt = p; for (Uint m(0); m<M; ++m) pt(m) /= dir(m); pt *= -1.0f; // In case any dir_m = 0, for some m, replace all non-finite values in // pt with 0 for (Uint m(0); m<M; ++m) { if (!finite(pt(m))) { pt(m) = 0; } } // Subtract one here so that it's strictly less than any intersection point. oldgamma = min(oldgamma, *std::min_element(pt.begin(), pt.end())) - 1; // Determine argmax_{k} a[k]*oldgamma + b[k]: // C = B + oldgamma * A = H * (p + oldgamma * dir) // Calculate pt = p + oldgamma * dir, then calculate C // This should avoid inconsistency with infinity, which pops up with // the other way of computing C. pt = oldgamma * dir + p; C = ublas::prec_prod(H, pt); // Initially, oldk = index of maximum element in C Uint oldk(my_vector_max_index(C)); #pragma omp critical (findSentenceIntervals_curBLEU) { curScore += scoreStats[oldk]; } // ends omp critical section numchanges = 0; while (true) { // Find the line whose intersection with the oldk-th line occurs next double newgamma(oldgamma); int newk(-1); if (A(oldk) == INFINITY) { // Not going to do any better as gamma gets bigger break; } else if (A(oldk) == -INFINITY || isnan(A(oldk))) { newgamma = INFINITY; // Find if/when this "line" changes from +INFINITY to -INFINITY or NaN for (Uint m(0); m<M; ++m) { if ((H(oldk, m) == INFINITY && dir(m) < 0) || (H(oldk, m) == -INFINITY && dir(m) > 0)) { newgamma = min(newgamma, -p(m) / dir(m)); // Find gamma s.t. p_m + dir_m * gamma = 0 // The first point where this occurs (under the // conditions in that if statement above) should be // where the "line" becomes -INFINITY } } if (newgamma <= oldgamma || newgamma == INFINITY) { break; } else { // Find maximum at curgamma. pt = newgamma * dir + p; C = ublas::prec_prod(H, pt); newk = my_vector_max_index(C); while (newk != -1 && found[newk]) { //while we've seen this newk already - catch to make //sure we select a good value C(newk) = -INFINITY; newk = my_vector_max_index(C); if (C(newk) == -INFINITY && found[newk]) newk = -1; } //newk = -1 || found[newk] == false } // if } else { for (Uint k(0); k<K; ++k) { //check to see if we haven't seen yet, not just if he //wasn't the last one picked if (!found[k]) { double curgamma(0.0f); if (A(k) == INFINITY || isnan(A(k))) { curgamma = -INFINITY; // Find where this "line" changes from -INFINITY or // NaN to +INFINITY for (Uint m(0); m<M; ++m) { if ((H(k, m) == INFINITY && dir(m) > 0) || (H(k, m) == -INFINITY && dir(m) < 0)) { curgamma = max(curgamma, -p(m) / dir(m)); // Find gamma s.t. p_m + dir_m * gamma = 0 // The last point where this occurs (under // the conditions in that if statement // above) should be where the "line" // becomes +INFINITY } // if } // for } else { curgamma = (B(k) - B(oldk)) / (A(oldk) - A(k)); // curgamma = (B[k] - B[oldk]) / (A[k] - A[oldk]) // This is the x component in the intersection of // the lines: // y = A[k] * x + B[k] , y = A[oldk] * x + B[oldk] } // if if (curgamma > oldgamma && (newk == -1 || curgamma < newgamma)) { newgamma = curgamma; newk = k; } // if } // if } // for } // if //gamma unacceptable value (-inf, inf, nan) or no new intersection found if (newk == -1 || !finite(newgamma)) { // no new intersections break; } // if // Remember stuff for this intersection assert(numchanges < K); gamma[numchanges] = newgamma; dBLEU[numchanges] = scoreStats[newk] - scoreStats[oldk]; numchanges++; oldk = newk; oldgamma = newgamma; found[newk] = true; } // while if (numchanges > 0) { myHeappoint = new linemaxpt(); myHeappoint->gamma = gamma[0]; myHeappoint->s = s; myHeappoint->i = 0; } else { myHeappoint = NULL; } // if } // ends LineMax<ScoreStats>::findSentenceIntervals //////////////////////////////////////// // LINEMAX template <class ScoreStats> void LineMax<ScoreStats>::operator()(uVector& p, uVector& dir, bool record_history) { // If the best range found is (-\infty, t) or (t, \infty), we use // t - SMALL or t + SMALL respectively as the final gamma. const double SMALL(1.0f); Uint numchanges[S]; this->record_history = record_history; if (record_history) history.clear(); ScoreStats curScoreStats; // Accumulate the current BLEU statistics. // Store the linemaxpt values for the least gamma in each partition; // will subsequently become a heap. linemaxpt* heappoints[S]; int s; #pragma omp parallel for private(s) for (s=0; s<int(S); ++s) { findSentenceIntervals(numchanges[s], gammaWorkSpace[s], scoreWorkSpace[s], curScoreStats, // Needs a one time lock heappoints[s], // clean up null pointers s, // const p, // const dir, // const vH[s], // const allScoreStats[s]); // const } // for // Remove the empty heap points and recalculate the heap size linemaxpt** last_heappoint = remove_if(heappoints, heappoints+S, linemaxpt::isNull); int heapsize(last_heappoint - heappoints); // Number of members in heappoints /* Using the previous computations, we now determine the intervals on which the BLEU score is constant. Essentially, we order the gamma[s][i]'s from least to greatest: gamma[s_1][i_1] <= gamma[s_2][i_2] <= .. <= gamma[s_N][i_N] and compute the BLEU score on the intervals (-\infty, gamma[s_1][i_1]), (gamma[s_N][i_N], \infty), and (gamma[s_n][i_n], gamma[s_{n+1}][i_{n+1}]) for all n. The BLEU stats for (-\infty, gamma[s_1][i_1]) are already stored in curScore. For each (s,i), we have recorded (in scoreWorkSpace) the change in the BLEU stats from the interval just before gamma[s][i] to the interval just after gamma[s][i]. By iterating through the (s, i)'s in order by gamma[s][i] (ie. iterating through the (s_n, i_n)'s in order by n), the BLEU stats for each interval are computed by updating the stats for the previous interval. In practice here, we already have gamma[s][0] <= gamma[s][1] <= .. <= gamma[s][numchanges[s] - 1] for each s, and this can be used to order the gamma[s][i]'s more efficiently (similar to mergesort). We use a heap containing triples (gamma, s, i), with the ordering that puts the triple with the least value for gamma at the root of the heap. The following outlines how we iterate through the (s, i) in order: i) Initially, produce a heap containing (gamma[s][0], s, 0) for each s. (The heap has at it's root the triple (gamma, s, i) for which gamma is least.) (In the special case that numchanges[s] = 0 for some s, we obviously cannot have (gamma[s][0], s, 0) to the heap since there is no gamma[s][0].) ii) At each iteration, remove the top, (gamma, s, i), of the heap (the next lowest gamma) and if i+1 < numchanges[s], add (gamma[s][i+1], s, i+1) to the heap. iii) Repeat (ii) until the heap is empty. Our heap is contained in the array heappoints. */ double maxscore(0.0f); // Will hold the best BLEU score double maxgamma(0.0f); // Will hold the gamma which produces the best BLEU score if (heapsize == 0) { // Special situation: no matter what gamma is, the BLEU score is // the same. maxscore = curScoreStats.score(); maxgamma = 0; // TODO: is 0 appropriate? I think so // cerr << "score at \\gamma = 0: " << curScoreStats.score() << endl; } else { // Create the heap make_heap(heappoints, heappoints + heapsize, linemaxpt::greater); maxscore = curScoreStats.score(); maxgamma = heappoints[0]->gamma - SMALL; double oldgamma(0.0f); // Holds the left endpoint of the interval whose stats are in curScoreStats while (true) { // Put max element at end of heap pop_heap(heappoints, heappoints + heapsize, linemaxpt::greater); // Update BLEU statistics curScoreStats += scoreWorkSpace[heappoints[heapsize - 1]->s][heappoints[heapsize - 1]->i]; // Save left endpoint of the new interval oldgamma = heappoints[heapsize - 1]->gamma; heappoints[heapsize - 1]->i++; // Determine whether there is a new point to add to the heap // (same s, but i increases) if (heappoints[heapsize - 1]->i < numchanges[heappoints[heapsize - 1]->s]) { // Add point (gamma[s][i], s, i) to the heap heappoints[heapsize - 1]->gamma = gammaWorkSpace[heappoints[heapsize - 1]->s][heappoints[heapsize - 1]->i]; push_heap(heappoints, heappoints + heapsize, linemaxpt::greater); } else { // Decrease heap size delete heappoints[heapsize - 1]; --heapsize; } // if // Exit loop if there are no new points (heap is empty) if (heapsize == 0) { break; } // for // Determine the BLEU score for the interval (oldgamma, heappoints[0]->gamma). const double curscore = curScoreStats.score(); // Determine if this is the new best score AND if the interval is non-empty if (curscore > maxscore && heappoints[0]->gamma != oldgamma) { // New best score maxscore = curscore; // Use the midpoint of this range. maxgamma = (heappoints[0]->gamma + oldgamma) / 2; } if (record_history) history.push_back(make_pair((heappoints[0]->gamma + oldgamma) / 2, curscore)); } // Consider final score const double curscore = curScoreStats.score(); if (curscore > maxscore) { // New best score maxscore = curscore; maxgamma = oldgamma + SMALL; } } // Return values appropriately. dir *= maxgamma; // dir = (maxgamma - 1) * dir + dir = maxgamma * dir p += dir; } // ends LineMax<ScoreStats>::operator() } // ends namespace Portage
GB_binop__islt_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__islt_int32 // A.*B function (eWiseMult): GB_AemultB__islt_int32 // A*D function (colscale): GB_AxD__islt_int32 // D*A function (rowscale): GB_DxB__islt_int32 // C+=B function (dense accum): GB_Cdense_accumB__islt_int32 // C+=b function (dense accum): GB_Cdense_accumb__islt_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__islt_int32 // C=scalar+B GB_bind1st__islt_int32 // C=scalar+B' GB_bind1st_tran__islt_int32 // C=A+scalar GB_bind2nd__islt_int32 // C=A'+scalar GB_bind2nd_tran__islt_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x < y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_INT32 || GxB_NO_ISLT_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__islt_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__islt_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__islt_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__islt_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__islt_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__islt_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__islt_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__islt_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__islt_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB_bind1st_tran__islt_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB_bind2nd_tran__islt_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB084-threadprivatemissing-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A file-scope variable used within a function called by a parallel region. No threadprivate is used to avoid data races. Data race pairs sum0@61:3 vs. sum0@61:8 sum0@61:3 vs. sum0@61:3 */ #include <stdio.h> #include <assert.h> int sum0=0, sum1=0; //#pragma omp threadprivate(sum0) void foo (int i) { sum0=sum0+i; } int main() { int i, sum=0; #pragma omp parallel { #pragma omp for schedule(dynamic) for (i=1;i<=1000;i++) { foo (i); } #pragma omp critical { sum= sum+sum0; } } /* reference calculation */ for (i=1;i<=1000;i++) { sum1=sum1+i; } printf("sum=%d; sum1=%d\n",sum,sum1); // assert(sum==sum1); return 0; }
support.c
#include <stdlib.h> #include <stdio.h> #include <time.h> #include <omp.h> #include "support.h" bool isEqualArray(int* arr1, int *arr2, int size) { bool isResultCorrect = true; int i = 0; while((isResultCorrect == true) && (i < size)) { if(arr1[i] != arr2[i]) { isResultCorrect = false; } i++; } return isResultCorrect; } void initializeArrays(int* arr1, int* arr2, int* arr3, int size, int minimumNumber, int maximumNumber) { int i; srand(time(NULL)); printf("Initializing the arrays...\n"); #pragma omp parallel for for (i = 0; i < size; i++) { arr1[i] = minimumNumber + (rand() % maximumNumber); arr2[i] = arr1[i]; arr3[i] = arr1[i]; } printf("Complete.\n"); } void copyArray(int* src, int* dst, int size) { int i; #pragma omp parallel for for (i = 0; i < size; i++) { dst[i] = src[i]; } }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
locate.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> #include <stdint.h> #include <math.h> #include <omp.h> #include <lapacke_utils.h> #include <mpi.h> #define memory_isAligned(POINTER, BYTE_COUNT) \ (((uintptr_t)(const void *)(POINTER)) % (BYTE_COUNT) == 0) int locate_l2_gridSearch__double64(const int ldgrd, const int ngrd, const int nobs, const int iwantOT, const double t0use, const int *__restrict__ mask, const double *__restrict__ tobs, const double *__restrict__ tcorr, const double *__restrict__ varobs, const double *__restrict__ test, double *__restrict__ t0, double *__restrict__ objfn); int locate_l2_gridSearch__float64(const int ldgrd, const int ngrd, const int nobs, const int iwantOT, const float t0use, const int *__restrict__ mask, const float *__restrict__ tobs, const float *__restrict__ tcorr, const float *__restrict__ varobs, const float *__restrict__ test, float *__restrict__ t0, float *__restrict__ objfn); int locate_l1_gridSearch__double64(const int ldgrd, const int ngrd, const int nobs, const int iwantOT, const double t0use, const int *__restrict__ mask, const double *__restrict__ tobs, const double *__restrict__ varobs, const double *__restrict__ test, double *__restrict__ t0, double *__restrict__ objfn); static void locate_l2_stackT0__double64(const int ngrd, const double tobs_i, const double xnorm, const double wt_i, const double *__restrict__ test, double *__restrict__ t0); static void locate_l2_stackT0__float64(const int ngrd, const float tobs_i, const float xnorm, const float wt_i, const float *__restrict__ test, float *__restrict__ t0); static void locate_l2_stackObjfn__double64(const int ngrd, const double tobs_i, const double wt_i, const double *__restrict__ test, const double *__restrict__ t0, double *__restrict__ objfn); static void locate_l2_stackObjfn__float64(const int ngrd, const float tobs_i, const float wt_i, const float *__restrict__ test, const float *__restrict__ t0, float *__restrict__ objfn); double weightedMedian__double(const int n, const double *__restrict__ x, const double *__restrict__ w, int *__restrict__ perm, bool *lsort, int *ierr); void locate_setDouble64(const int n, const double x0in, double *__restrict__ x); void locate_setFloat64(const int n, const float x0in, float *__restrict__ x); static void locate_nullDouble64(const int n, double *__restrict__ x); void locate_nullFloat64(const int n, float *__restrict__ x); double locate_sumDouble64(const int n, const double *__restrict__ x); float locate_sumFloat64(const int n, const float *__restrict__ x); int locate_sumInt(const int n, const int *__restrict__ x); int locate_minLocDouble64(const int n, const double *__restrict__ x); int locate_minLocFloat64(const int n, const float *__restrict__ x); int memory_malloc__double(const int n, double **x); int memory_malloc__float(const int n, float **x); int memory_malloc__int(const int n, int **x); double *memory_calloc__double(const int n); float *memory_calloc__float(const int n); int *memory_calloc__int(const int n); void makeTest(const int nx, const int ny, const int nz, const double dx, const double dy, const double dz, const double xsrc, const double ysrc, const double zsrc, double *__restrict__ test); double makeObs(const double x, const double y, const double z, const double xsrc, const double ysrc, const double zsrc); int main2() { return 0; } int main() { double *objfn, *xrec, *yrec, *zrec, *t0, *tcorr, *test, *tobs, *varobs; float *objfn4, *t04, *tcorr4, *test4, *tobs4, *varobs4; int *mask; double dx, dy, dz, t0use, xsrc, ysrc, zsrc; float t0use4; int igrd, iobs, iopt, ixsrc, iysrc, izsrc, iwantOT, ldgrd, ngrd, nobs, nx, ny, nz; bool lsort; srand(4042); nobs = 20; iwantOT = 1; t0use = 4.0; dx = 1.e3; dy = 1.e3; dz = 1.e3; nx = 145; ny = 145; nz = 45; ngrd = nx*ny*nz; ldgrd = ngrd + 64 - ngrd%64; ixsrc = 12; iysrc = 15; izsrc = 5; printf("true: %d\n", izsrc*nx*ny + iysrc*nx + ixsrc); xsrc = (double) ixsrc*dx; ysrc = (double) iysrc*dy; zsrc = (double) izsrc*dz; xrec = memory_calloc__double(nobs); yrec = memory_calloc__double(nobs); zrec = memory_calloc__double(nobs); tobs = memory_calloc__double(nobs); tcorr = memory_calloc__double(nobs); varobs = memory_calloc__double(nobs); test = memory_calloc__double(nobs*ldgrd); mask = (int *)calloc((size_t) nobs, sizeof(int)); // Make the receiver locations for (iobs=0; iobs<nobs; iobs++) { xrec[iobs] = ((double) (rand()))/RAND_MAX; yrec[iobs] = ((double) (rand()))/RAND_MAX; zrec[iobs] = ((double) (rand()))/RAND_MAX; xrec[iobs] = xrec[iobs]*(double) (nx - 1)*dx; yrec[iobs] = yrec[iobs]*(double) (ny - 1)*dy; zrec[iobs] = zrec[iobs]*(double) (nz - 1)*dz; tobs[iobs] = makeObs(xrec[iobs], yrec[iobs], zrec[iobs], xsrc, ysrc, zsrc); varobs[iobs] = ((double) (rand()))/RAND_MAX; //0.2; makeTest(nx, ny, nz, dx, dy, dz, xrec[iobs], yrec[iobs], zrec[iobs], &test[ldgrd*iobs]); } // Add the origin time if (iwantOT == 1) { for (iobs=0; iobs<nobs; iobs++) { tobs[iobs] = tobs[iobs] + t0use; } } t0 = memory_calloc__double(ngrd); objfn = memory_calloc__double(ngrd); locate_l1_gridSearch__double64(ldgrd, ngrd, nobs, iwantOT, t0use, mask, tobs, varobs, test, t0, objfn); iopt = locate_minLocDouble64(ngrd, objfn); printf("l1 first estimate: %d %f\n", iopt, t0[iopt]); locate_l2_gridSearch__double64(ldgrd, ngrd, nobs, iwantOT, t0use, mask, tobs, tcorr, varobs, test, t0, objfn); iopt = locate_minLocDouble64(ngrd, objfn); printf("double estimate: %d %f\n", iopt, t0[iopt]); locate_l1_gridSearch__double64(ldgrd, ngrd, nobs, iwantOT, t0use, mask, tobs, varobs, test, t0, objfn); iopt = locate_minLocDouble64(ngrd, objfn); printf("double l1 testimate: %d %f\n", iopt, t0[iopt]); free(objfn); free(t0); // Set the float problem t0use4 = (float) t0use; tobs4 = memory_calloc__float(nobs); tcorr4 = memory_calloc__float(nobs); varobs4 = memory_calloc__float(nobs); test4 = memory_calloc__float(ldgrd*nobs); for (iobs=0; iobs<nobs; iobs++) { tobs4[iobs] = (float) tobs[iobs]; varobs4[iobs] = (float) varobs[iobs]; for (igrd=0; igrd<ngrd; igrd++) { test4[ldgrd*iobs+igrd] = (float) test[ldgrd*iobs+igrd]; } } free(test); free(tobs); free(tcorr); free(varobs); objfn4 = memory_calloc__float(ngrd); t04 = memory_calloc__float(ngrd); locate_l2_gridSearch__float64(ldgrd, ngrd, nobs, iwantOT, t0use4, mask, tobs4, tcorr4, varobs4, test4, t04, objfn4); iopt = locate_minLocFloat64(ngrd, objfn4); printf("float estimate: %d\n", iopt); free(test4); free(objfn4); free(t04); free(tobs4); free(tcorr4); free(varobs4); free(mask); free(xrec); free(yrec); free(zrec); double xs[7] = {0.1, 0.35, 0.05, 0.1, 0.15, 0.05, 0.2}; double ws[7] = {0.1, 0.35, 0.05, 0.1, 0.15, 0.05, 0.2}; double w1[7] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; int perm[7]; double wmed; int ierr; wmed = weightedMedian__double(7, xs, ws, perm, &lsort, &ierr); printf("%f\n", wmed); wmed = weightedMedian__double(7, xs, w1, perm, &lsort, &ierr); printf("%f\n", wmed); return 0; } void makeTest(const int nx, const int ny, const int nz, const double dx, const double dy, const double dz, const double xsrc, const double ysrc, const double zsrc, double *__restrict__ test) { double d2, x, y, z; int i, igrd, j, k; const double slow = 1.0/5000.0; for (k=0; k<nz; k++) { for (j=0; j<ny; j++) { for (i=0; i<nx; i++) { igrd = k*nx*ny + j*nx + i; x = (double) i*dx; y = (double) j*dy; z = (double) k*dz; d2 = pow(x-xsrc, 2) + pow(y-ysrc, 2) + pow(z-zsrc, 2); test[igrd] = sqrt(d2)*slow; } } } return; } double makeObs(const double x, const double y, const double z, const double xsrc, const double ysrc, const double zsrc) { const double slow = 1.0/5000.0; double d2, tobs; d2 = pow(x-xsrc, 2) + pow(y-ysrc, 2) + pow(z-zsrc, 2); tobs = sqrt(d2)*slow; return tobs; } int computeAngles(const int nx, const int ny, const int nz, const bool lbackProj, const double *__restrict__ ttimes, double *__restrict__ az, double *__restrict__ aoi) { int i, i1, i2, ic, igrd, ix, iy, iz, j, k, ngrdx, ngrdx3, nxy, y3, z3; double *work, *workxm, *workxp, *workym, *workyp, *workzm, *workzp, dxh, dxh2, dyh, dyh2, dzh, dzh2, fxmh, fxph, fymh, fyph, fzmh, fzph, phi, rho, theta; const int chunkx = 16; const double half = 0.5; const double pi180i = 180.0/M_PI; ngrdx = 16; ngrdx3 = ngrdx*3; nxy = nx*ny; work = memory_calloc__double((chunkx+1)*9); workxp = memory_calloc__double(2*(chunkx+1)); workyp = memory_calloc__double(2*(chunkx+1)); workzp = memory_calloc__double(2*(chunkx+1)); workxm = memory_calloc__double(chunkx+1); workym = memory_calloc__double(chunkx+1); workzm = memory_calloc__double(chunkx+1); for (k=1; k<nz-1; k++) { for (j=1; j<ny-1; j++) { for (i=1; i<nx-1; i=i+chunkx) { // prefetch the values in the grid i1 = i-1; i2 = MIN(nx-2, i+chunkx); // differentiate pre-fetched values for (z3=-1; z3<=1; z3++) { for (y3=-1; y3<=1; y3++) { iz = k + z3; iy = j + y3; for (ix=i1; ix<i2; ix++) { igrd = iz*nxy + iy*nx + ix; // central difference ic = (z3 + 1)*ngrdx3 + (y3 + 1)*ngrdx + (ix - i1 + 1); ic = ix; fxph = half*(workxp[ic] + workxm[ic]); //half*(work[ic+1] + work[ic]); //fxmh = half*(workx[ic] + workx[ic-4]); //half*(work[ic] + work[ic-1]); fyph = half*(workyp[ic] + workym[ic]); //half*(work[ic+ngrdx] + work[ic]); //fymh = half*(worky[ic] + worky[ic-4]); //half*(work[ic] + work[ic-ngrdx]); fzph = half*(workzp[ic] + workzm[ic]); //half*(work[ic+ngrdx3] + work[ic]); //fzph = half*(workz[ic] + workz[ic-4]); //half*(work[ic] + work[ic-ngrdx3]); dxh = fxph;// - fxmh; dyh = fyph;// - fymh; dzh = fzph;// - fzmh; dxh2 = dxh*dxh; dyh2 = dyh*dyh; dzh2 = dzh*dzh; rho = sqrt(dxh2 + dyh2 + dzh2); phi = acos(dzh/rho)*pi180i; theta = atan2(dyh, dxh)*pi180i; // the modeling coordinate system is right handed up // while the observation coordinate system is + from // north. hence, make +x 'north' and -y 'east'. az[igrd] = 90.0 - theta; if (az[igrd] < 0.0){az[igrd] = az[igrd] + 360.0;} aoi[igrd] = theta - 180.0; } } } } } } free(work); free(workxm); free(workym); free(workzm); free(workxp); free(workyp); free(workzp); return 0; } /*! * @brief Stacks the weighted residuals into the analytic origin time * computation. This is for the analytic removal of the source * time in a least squares optimization as descxribed by Moser * et. al. 1992 Eqn 19 for diagonally weighted matrices. * * @param[in] ngrd number of grid points in the grid * @param[in] tobs_i i'th observed pick time (seconds) * @param[in] xnorm normalization factor which is the inverse of the * sum of the data weights for all observations * (1/seconds) * @param[in] wt_i i'th observed pick time weight (1/seconds) * @param[in] test estimate arrival times (seconds) at all points in the * grid. [ngrd] * * @param[in,out] t0 on input contains the currented weighted residual * sum for previous observations. * on output contains the updated weighted residual * sum which incorporates the current observation. [ngrd] * * @author Ben Baker * * @copyright Apache 2 * */ static void locate_l2_stackT0__double64(const int ngrd, const double tobs_i, const double xnorm, const double wt_i, const double *__restrict__ test, double *__restrict__ t0) { double wt __attribute__ ((aligned(64))) = 0.0; double tobs __attribute__ ((aligned(64))) = 0.0; int igrd __attribute__ ((aligned(64))) = 0; //------------------------------------------------------------------------// wt = wt_i/xnorm; tobs = tobs_i; #ifdef __INTEL_COMPILER __assume_aligned(test, 64); __assume_aligned(t0, 64); #else #pragma omp simd aligned(t0, test:64) #endif for (igrd=0; igrd<ngrd; igrd++) { t0[igrd] = t0[igrd] + wt*(tobs - test[igrd]); } return; } //============================================================================// /*! * @brief Stacks the weighted residuals into the analytic origin time * computation. This is for the analytic removal of the source * time in a least squares optimization as descxribed by Moser * et. al. 1992 Eqn 19 for diagonally weighted matrices. * * @param[in] ngrd number of grid points in the grid * @param[in] tobs_i i'th observed pick time (seconds) * @param[in] xnorm normalization factor which is the inverse of the * sum of the data weights for all observations * (1/seconds) * @param[in] wt_i i'th observed pick time weight (1/seconds) * @param[in] test estimate arrival times (seconds) at all points in the * grid. [ngrd] * * @param[in,out] t0 on input contains the currented weighted residual * sum for previous observations. * on output contains the updated weighted residual * sum which incorporates the current observation. [ngrd] * * @author Ben Baker * * @copyright Apache 2 * */ static void locate_l2_stackT0__float64(const int ngrd, const float tobs_i, const float xnorm, const float wt_i, const float *__restrict__ test, float *__restrict__ t0) { float wt __attribute__ ((aligned(64))) = 0.0f; float tobs __attribute__ ((aligned(64))) = 0.0f; int igrd __attribute__ ((aligned(64))) = 0; //------------------------------------------------------------------------// wt = wt_i/xnorm; tobs = tobs_i; #ifdef __INTEL_COMPILER __assume_aligned(test, 64); __assume_aligned(t0, 64); #else #pragma omp simd aligned(t0, test:64) #endif for (igrd=0; igrd<ngrd; igrd++) { t0[igrd] = t0[igrd] + wt*(tobs - test[igrd]); } return; } //============================================================================// /*! * @brief Stacks the squared residuals into the least squares penalty * function. This assumes diagonal weighting. * * @param[in] ngrd number of poitns in grid search * @param[in] tobs_i i'th observed pick time (seconds) * @param[in] wt_i i'th observed pick time weight (1/seconds) * @param[in] test estimate arrival times (seconds) at all points in the * grid. [ngrd] * @param[in] t0 origin time (seconds) at all points in grid [ngrd] * * @param[in,out] objfn on input contains the weighted squared residuals * at all poitns in the grid. * on output contains the contribution of this * observation to all the squared residuals at all * points in the grid. [ngrd] * * @author Ben Baker * * @copyright Apache 2 * */ static void locate_l2_stackObjfn__double64(const int ngrd, const double tobs_i, const double wt_i, const double *__restrict__ test, const double *__restrict__ t0, double *__restrict__ objfn) { double wt __attribute__ ((aligned(64))) = 0.0; double res __attribute__ ((aligned(64))) = 0.0; double tobs __attribute__ ((aligned(64))) = 0.0; int igrd __attribute__ ((aligned(64))) = 0; const double sqrt2i = 0.7071067811865475; //one/sqrt(two); //------------------------------------------------------------------------// wt = wt_i*sqrt2i; tobs = tobs_i; #ifdef __INTEL_COMPILER __assume_aligned(test, 64); __assume_aligned(t0, 64); __assume_aligned(objfn, 64); #else #pragma omp simd aligned(t0, test, objfn: 64) #endif for (igrd=0; igrd<ngrd; igrd++) { res = wt*(tobs - (test[igrd] + t0[igrd])); objfn[igrd] = objfn[igrd] + res*res; } return; } //============================================================================// /*! * @brief Stacks the squared residuals into the least squares penalty * function. This assumes diagonal weighting. * * @param[in] ngrd number of poitns in grid search * @param[in] tobs_i i'th observed pick time (seconds) * @param[in] wt_i i'th observed pick time weight (1/seconds) * @param[in] test estimate arrival times (seconds) at all points in the * grid. [ngrd] * @param[in] t0 origin time (seconds) at all points in grid [ngrd] * * @param[in,out] objfn on input contains the weighted squared residuals * at all poitns in the grid. * on output contains the contribution of this * observation to all the squared residuals at all * points in the grid. [ngrd] * * @author Ben Baker * * @copyright Apache 2 * */ static void locate_l2_stackObjfn__float64(const int ngrd, const float tobs_i, const float wt_i, const float *__restrict__ test, const float *__restrict__ t0, float *__restrict__ objfn) { float wt __attribute__ ((aligned(64))) = 0.0f; float res __attribute__ ((aligned(64))) = 0.0f; float tobs __attribute__ ((aligned(64))) = 0.0f; int igrd __attribute__ ((aligned(64))) = 0; const float sqrt2i = 0.7071067811865475f; //one/sqrt(two); //------------------------------------------------------------------------// wt = wt_i*sqrt2i; tobs = tobs_i; #ifdef __INTEL_COMPILER __assume_aligned(test, 64); __assume_aligned(t0, 64); __assume_aligned(objfn, 64); #else #pragma omp simd aligned(t0, test, objfn:64) #endif for (igrd=0; igrd<ngrd; igrd++) { res = wt*(tobs - (test[igrd] + t0[igrd])); objfn[igrd] = objfn[igrd] + res*res; } return; } //============================================================================// double *memory_calloc__double(const int n) { double *x = NULL; int i, ierr; const double zero __attribute__ ((aligned(64))) = 0.0; ierr = memory_malloc__double(n, &x); #ifdef __INTEL_COMPILER __assume_aligned(x, 64); #endif //memset(x, 0.0, (size_t) n*sizeof(double)); for (i=0; i<n; i++) { x[i] = zero; } return x; } float *memory_calloc__float(const int n) { float *x = NULL; int i, ierr; const float zero __attribute__ ((aligned(64))) = 0.0f; ierr = memory_malloc__float(n, &x); #ifdef __INTEL_COMPILER __assume_aligned(x, 64); #endif //memset(x, 0.0f, (size_t) n*sizeof(float)); for (i=0; i<n; i++) { x[i] = zero; } return x; } int *memory_calloc__int(const int n) { int *x = NULL; int i, ierr; const int zero __attribute__ ((aligned(64))) = 0; ierr = memory_malloc__int(n, &x); #ifdef __INTEL_COMPILER __assume_aligned(x, 64); #endif //memset(x, 0.0f, (size_t) n*sizeof(float)); for (i=0; i<n; i++) { x[i] = zero; } return x; } int memory_malloc__double(const int n, double **x) { const char *fcnm = "memory_malloc__double\0"; size_t nbytes; int ierr; ierr = 0; nbytes = (size_t) (n + 64 - n%64)*sizeof(double); *x = (double *) aligned_alloc(64, nbytes); if (*x == NULL) { printf("%s: Error allocating array\n", fcnm); ierr = 1; } return ierr; } int memory_malloc__float(const int n, float **x) { const char *fcnm = "memory_malloc__float\0"; size_t nbytes; int ierr; ierr = 0; nbytes = (size_t) (n + 64 - n%64)*sizeof(float); *x = (float *) aligned_alloc(64, nbytes); if (*x == NULL) { printf("%s: Error allocating array\n", fcnm); ierr = 1; } return ierr; } int memory_malloc__int(const int n, int **x) { const char *fcnm = "memory_malloc__int\0"; size_t nbytes; int ierr; ierr = 0; nbytes = (size_t) (n + 64 - n%64)*sizeof(int); *x = (int *) aligned_alloc(64, nbytes); if (*x == NULL) { printf("%s: Error allocating array\n", fcnm); ierr = 1; } return ierr; } //============================================================================// /*! * @brief Sets all elements of a 64 bit aligned array x to x0in * * @param[in] n number of points in array x * @param[in] x0in value to set * * @param[out] x all values of array set to x0 [n] * * @author Ben Baker * * @copyright Apache 2 * */ void locate_setDouble64(const int n, const double x0in, double *__restrict__ x) { double x0 __attribute__ ((aligned(64))) = x0in; int i __attribute__ ((aligned(64))) = 0; #ifdef __INTEL_COMPILER __assume_aligned(x, 64); #else #pragma omp simd aligned(x : 64) #endif for (i=0; i<n; i++) { x[i] = x0; } } //============================================================================// /*! * @brief Sets all elements of a 64 bit aligned array x to x0in * * @param[in] n number of points in array x * @param[in] x0in value to set * * @param[out] x all values of array set to x0 [n] * * @author Ben Baker * * @copyright Apache 2 * */ void locate_setFloat64(const int n, const float x0in, float *__restrict__ x) { float x0 __attribute__ ((aligned(64))) = x0in; int i __attribute__ ((aligned(64))) = 0; #ifdef __INTEL_COMPILER __assume_aligned(x, 64); #else #pragma omp simd aligned(x : 64) #endif for (i=0; i<n; i++) { x[i] = x0; } } //============================================================================// /*! * @brief Zeros out a 64 bit aligned array x * * @param[in] n number of points in arra yx * * @param[out] x nulled out array [n] * * @author Ben Baker * * @copyright Apache 2 * */ static void locate_nullDouble64(const int n, double *__restrict__ x) { double zero __attribute__ ((aligned(64))) = 0.0; int i __attribute__ ((aligned(64))) = 0; #ifdef __INTEL_COMPILER __assume_aligned(x, 64); #else #pragma omp simd aligned(x : 64) #endif for (i=0; i<n; i++) { x[i] = zero; } return; } //============================================================================// /*! * @brief Zeros out a 64 bit aligned array x * * @param[in] n number of points in arra yx * * @param[out] x nulled out array [n] * * @author Ben Baker * * @copyright Apache 2 * */ void locate_nullFloat64(const int n, float *__restrict__ x) { float zero __attribute__ ((aligned(64))) = 0.0f; int i __attribute__ ((aligned(64))) = 0; #ifdef __INTEL_COMPILER __assume_aligned(x, 64); #else #pragma omp simd aligned(x : 64) #endif for (i=0; i<n; i++) { x[i] = zero; } return; } //============================================================================// /*! * @brief Sums all elements in an array * * @param[in] n number of points in array to sum * @param[in] x array to sum [n] * * @result sum of all elements in array x * * @author Ben Baker * * @copyright Apache 2 * */ double locate_sumDouble64(const int n, const double *__restrict__ x) { double xsum __attribute__ ((aligned(64))) = 0.0; int i __attribute__ ((aligned(64))) = 0; #ifdef __INTEL_COMPILER __assume_aligned(x, 64); #else #pragma omp simd aligned(x : 64) reduction(+:xsum) #endif for (i=0; i<n; i++) { xsum = xsum + x[i]; } return xsum; } //============================================================================// int locate_minLocDouble64(const int n, const double *__restrict__ x) { double xmin __attribute__ ((aligned(64))) = 0.0; int imin __attribute__ ((aligned(64))) = 0; int i __attribute__ ((aligned(64))) = 0; xmin = x[0]; imin = 0; #ifdef __INTEL_COMPILER __assume_aligned(x, 64); #endif for (i=1; i<n; i++) { if (x[i] < xmin) { imin = i; xmin = x[i]; } } return imin; } //============================================================================// int locate_minLocFloat64(const int n, const float *__restrict__ x) { float xmin __attribute__ ((aligned(64))) = 0.0f; int imin __attribute__ ((aligned(64))) = 0; int i __attribute__ ((aligned(64))) = 0; xmin = x[0]; imin = 0; #ifdef __INTEL_COMPILER __assume_aligned(x, 64); #endif for (i=1; i<n; i++) { if (x[i] < xmin) { imin = i; xmin = x[i]; } } return imin; } //============================================================================// /*! * @brief Sums all elements in an array * * @param[in] n number of points in array to sum * @param[in] x array to sum [n] * * @result sum of all elements in array x * * @author Ben Baker * * @copyright Apache 2 * */ float locate_sumFloat64(const int n, const float *__restrict__ x) { float xsum __attribute__ ((aligned(64))) = 0.0f; int i __attribute__ ((aligned(64))) = 0; #ifdef __INTEL_COMPILER __assume_aligned(x, 64); #else #pragma omp simd aligned(x : 64) reduction(+:xsum) #endif for (i=0; i<n; i++) { xsum = xsum + x[i]; } return xsum; } //============================================================================// int locate_sumInt(const int n, const int *__restrict__ x) { int i, xsum; xsum = 0; for (i=0; i<n; i++) { xsum = xsum + x[i]; } return xsum; } //============================================================================// /*! * @brief Performs the least squares gridsearch. If the origin time is * desired then the locations correspond to an analytic integration * of the origin time (e.g. Moser et al. 1992). * * @param[in] ldgrd leading dimension of traveltime tables (>= ngrd) * @param[in] ngrd number of grid points in traveltime tables * @param[in] nobs number of observations * @param[in] iwantOT if 1 then compute the origint time. * otherwise the origin time will be set by t0use. * @param[in] t0use if iwantOT is not when this this is the source * origin time (seconds) * @param[in] mask if the i'th observation is 1 then it is masked from * from the location [nobs] * @param[in] tobs observed pick times (seconds) [nobs] * @param[in] tcorr static corrections (seconds) for stations [nobs]. * if NULL then it is ignored and assumed all zero. * @param[in] varobs variance in pick times (seconds) [nobs] * @param[in] test traveltime tables for each observation [ldgrd x nobs] * with leading dimension ldgrd. * * @param[out] t0 origin time (seconds) at each point in grid serach [ngrd] * @param[out] objfn residual squared objective function at each point in * the grid [ngrd]. * * @author Ben Baker * * @copyright Apache 2 * */ int locate_l2_gridSearch__double64(const int ldgrd, const int ngrd, const int nobs, const int iwantOT, const double t0use, const int *__restrict__ mask, const double *__restrict__ tobs, const double *__restrict__ tcorr, const double *__restrict__ varobs, const double *__restrict__ test, double *__restrict__ t0, double *__restrict__ objfn) { const char *fcnm = "locate_l2_gridSearch__double64\0"; double *tobsCor, *wtUse; double xnorm __attribute__ ((aligned(64))) = 0.0; double tobs_i __attribute__ ((aligned(64))) = 0.0; double wt_i __attribute__ ((aligned(64))) = 0.0; const double zero = 0.0; const double one = 1.0; int *obsPtr, ibeg, ierr, iobs, jobs, nobsUse; //------------------------------------------------------------------------// // // Error checking ierr = 0; if ((sizeof(double)*(size_t) ldgrd)%64 != 0 || ldgrd < ngrd || nobs < 1 || mask == NULL || tobs == NULL || varobs == NULL || test == NULL || t0 == NULL || objfn == NULL) { if ((sizeof(double)*(size_t) ldgrd)%64 != 0) { printf("%s: Error ldgrd must be divisible by 64\n", fcnm); } if (ldgrd < ngrd){printf("%s: Error ldgrd < ngrd\n", fcnm);} if (mask == NULL){printf("%s: mask is null\n", fcnm);} if (tobs == NULL){printf("%s: tobs is null\n", fcnm);} if (varobs == NULL){printf("%s: varobs is null\n", fcnm);} if (test == NULL){printf("%s: test is null\n", fcnm);} if (t0 == NULL){printf("%s: t0 is null\n", fcnm);} if (objfn == NULL){printf("%s: objfn is null\n", fcnm);} ierr = 1; return ierr; } // Require the arrays be 64 bit aligned if (memory_isAligned(t0, 64) != 1 || memory_isAligned(test, 64) != 1 || memory_isAligned(objfn, 64) != 1) { printf("%s: Input arrays are not 64 bit aligned\n", fcnm); ierr = 1; return ierr; } // zero out the result locate_nullDouble64(ngrd, objfn); // Set the static corrections. While it would seem more sensible // to add the correction to the estimate recall that we are ultimately // interested in residuals so we instead remove it from the observation // because t_obs - (t_est + t_stat) = t_obs - t_stat - t_est = t_cor - t_et obsPtr = memory_calloc__int(nobs); tobsCor = memory_calloc__double(nobs); wtUse = memory_calloc__double(nobs); nobsUse = 0; xnorm = zero; if (tcorr == NULL) { for (iobs=0; iobs<nobs; iobs++) { if (mask[iobs] == 0) { tobsCor[nobsUse] = tobs[iobs]; obsPtr[nobsUse] = iobs; wtUse[nobsUse] = one/varobs[iobs]; xnorm = xnorm + wtUse[nobsUse]; //one/varobs[iobs]; nobsUse = nobsUse + 1; } } } else { for (iobs=0; iobs<nobs; iobs++) { if (mask[iobs] == 0) { tobsCor[nobsUse] = tobs[iobs] - tcorr[iobs]; obsPtr[nobsUse] = iobs; wtUse[nobsUse] = one/varobs[iobs]; xnorm = xnorm + wtUse[nobsUse]; //one/varobs[iobs]; nobsUse = nobsUse + 1; } } } // Compute the least-squares origin time which is the average reisdual if (iwantOT == 1) { locate_nullDouble64(ngrd, t0); for (jobs=0; jobs<nobsUse; jobs++) { iobs = obsPtr[jobs]; tobs_i = tobsCor[jobs]; wt_i = wtUse[jobs]; //one/varUse[jobs]; ibeg = ldgrd*iobs; locate_l2_stackT0__double64(ngrd, tobs_i, xnorm, wt_i, &test[ibeg], t0); } } // Set the desired residual else { locate_setDouble64(ngrd, t0use, t0); } // Compute the locations with the origin times at each grid point for (jobs=0; jobs<nobsUse; jobs++) { iobs = obsPtr[jobs]; tobs_i = tobsCor[jobs]; wt_i = wtUse[jobs]; //one/varUse[jobs]; ibeg = ldgrd*iobs; locate_l2_stackObjfn__double64(ngrd, tobs_i, wt_i, &test[ibeg], t0, objfn); } free(obsPtr); free(tobsCor); free(wtUse); return ierr; } //============================================================================// /*! * @brief Performs the least squares gridsearch. If the origin time is * desired then the locations correspond to an analytic integration * of the origin time (e.g. Moser et al. 1992). * * @param[in] ldgrd leading dimension of traveltime tables (>= ngrd) * @param[in] ngrd number of grid points in traveltime tables * @param[in] nobs number of observations * @param[in] iwantOT if 1 then compute the origint time. * otherwise the origin time will be set by t0use. * @param[in] t0use if iwantOT is not when this this is the source * origin time (seconds) * @param[in] mask if the i'th observation is 1 then it is masked from * from the location [nobs] * @param[in] tobs observed pick times (seconds) [nobs] * @param[in] tcorr static corrections (seconds) for stations [nobs]. * if NULL then it is ignored and assumed all zero. * @param[in] varobs variance in pick times (seconds) [nobs] * @param[in] test traveltime tables for each observation [ldgrd x nobs] * with leading dimension ldgrd. * * @param[out] t0 origin time (seconds) at each point in grid serach [ngrd] * @param[out] objfn residual squared objective function at each point in * the grid [ngrd]. * * @author Ben Baker * * @copyright Apache 2 * */ int locate_l2_gridSearch__float64(const int ldgrd, const int ngrd, const int nobs, const int iwantOT, const float t0use, const int *__restrict__ mask, const float *__restrict__ tobs, const float *__restrict__ tcorr, const float *__restrict__ varobs, const float *__restrict__ test, float *__restrict__ t0, float *__restrict__ objfn) { const char *fcnm = "locate_l2_gridSearch__float64\0"; float *tobsCor, *wtUse; float xnorm __attribute__ ((aligned(64))) = 0.0f; float tobs_i __attribute__ ((aligned(64))) = 0.0f; float wt_i __attribute__ ((aligned(64))) = 0.0f; const float zero = 0.0f; const float one = 1.0f; int ibeg, ierr, iobs, jobs, nobsUse, *obsPtr; //------------------------------------------------------------------------// // // Error checking ierr = 0; if ((sizeof(float)*(size_t) ldgrd)%64 != 0 || ldgrd < ngrd || nobs < 1 || mask == NULL || tobs == NULL || varobs == NULL || test == NULL || t0 == NULL || objfn == NULL) { if ((sizeof(float)*(size_t) ldgrd)%64 != 0) { printf("%s: Error ldgrd must be divisible by 64\n", fcnm); } if (ldgrd < ngrd){printf("%s: Error ldgrd < ngrd\n", fcnm);} if (mask == NULL){printf("%s: mask is null\n", fcnm);} if (tobs == NULL){printf("%s: tobs is null\n", fcnm);} if (varobs == NULL){printf("%s: varobs is null\n", fcnm);} if (test == NULL){printf("%s: test is null\n", fcnm);} if (t0 == NULL){printf("%s: t0 is null\n", fcnm);} if (objfn == NULL){printf("%s: objfn is null\n", fcnm);} ierr = 1; return ierr; } // Require the arrays be 64 bit aligned if (memory_isAligned(t0, 64) != 1 || memory_isAligned(test, 64) != 1 || memory_isAligned(objfn, 64) != 1) { printf("%s: Input arrays are not 64 bit aligned\n", fcnm); ierr = 1; return ierr; } // zero out the result locate_nullFloat64(ngrd, objfn); // Set the static corrections. While it would seem more sensible // to add the correction to the estimate recall that we are ultimately // interested in residuals so we instead remove it from the observation // because t_obs - (t_est + t_stat) = t_obs - t_stat - t_est = t_cor - t_est obsPtr = memory_calloc__int(nobs); tobsCor = memory_calloc__float(nobs); wtUse = memory_calloc__float(nobs); nobsUse = 0; xnorm = zero; if (tcorr == NULL) { for (iobs=0; iobs<nobs; iobs++) { if (mask[iobs] == 0) { tobsCor[nobsUse] = tobs[iobs]; obsPtr[nobsUse] = iobs; wtUse[nobsUse] = one/varobs[iobs]; xnorm = xnorm + wtUse[nobsUse]; //one/varobs[iobs]; nobsUse = nobsUse + 1; } } } else { for (iobs=0; iobs<nobs; iobs++) { if (mask[iobs] == 0) { tobsCor[nobsUse] = tobs[iobs] - tcorr[iobs]; obsPtr[nobsUse] = iobs; wtUse[nobsUse] = one/varobs[iobs]; xnorm = xnorm + wtUse[nobsUse]; //one/varobs[iobs]; nobsUse = nobsUse + 1; } } } // Compute the least-squares origin time which is the average reisdual if (iwantOT == 1) { locate_nullFloat64(ngrd, t0); for (jobs=0; jobs<nobsUse; jobs++) { iobs = obsPtr[jobs]; tobs_i = tobsCor[jobs]; wt_i = wtUse[jobs]; //one/varUse[jobs]; ibeg = ldgrd*iobs; locate_l2_stackT0__float64(ngrd, tobs_i, xnorm, wt_i, &test[ibeg], t0); } } // Set the desired residual else { locate_setFloat64(ngrd, t0use, t0); } // Compute the locations with the origin times at each grid point for (jobs=0; jobs<nobsUse; jobs++) { iobs = obsPtr[jobs]; tobs_i = tobsCor[jobs]; wt_i = wtUse[jobs]; //one/varUse[jobs]; ibeg = ldgrd*iobs; locate_l2_stackObjfn__float64(ngrd, tobs_i, wt_i, &test[ibeg], t0, objfn); } free(obsPtr); free(tobsCor); free(wtUse); return ierr; } //============================================================================// int locate_l1_gridSearch__double64(const int ldgrd, const int ngrd, const int nobs, const int iwantOT, const double t0use, const int *__restrict__ mask, const double *__restrict__ tobs, const double *__restrict__ varobs, const double *__restrict__ test, double *__restrict__ t0, double *__restrict__ objfn) { double *testPerm, *tobsUse, *wt, *wtSort, *res;//, *resSort; double rsum __attribute__ ((aligned(64))) = 0.0; double t0opt __attribute__ ((aligned(64))) = 0.0; double tobs_i __attribute__ ((aligned(64))) = 0.0; double wt_i __attribute__ ((aligned(64))) = 0.0; const double one __attribute__ ((aligned(64))) = 1.0; const double zero __attribute__ ((aligned(64))) = 0.0; double wtsum, wtsumi; int *obsPtr, *perm, ierr, igrd, igrd1, igrd2, iobs, indx, jgrd, jndx, jobs, ldobs, nobsUse, nsort; bool lsort; const int nchunk = 256; ierr = 0; perm = memory_calloc__int(nobs); obsPtr = memory_calloc__int(nobs); tobsUse = memory_calloc__double(nobs); wt = memory_calloc__double(nobs); wtsum = 0.0; nobsUse = 0; for (iobs=0; iobs<nobs; iobs++) { if (mask[iobs] == 0) { wt[nobsUse] = one/varobs[iobs]; tobsUse[nobsUse] = tobs[iobs]; obsPtr[iobs] = iobs; nobsUse = nobsUse + 1; } wtsum = wtsum + wt[iobs]; } ldobs = nobsUse + 8 - nobsUse%8; locate_nullDouble64(ngrd, objfn); if (iwantOT == 1) { lsort = false; nsort = 0; // Zero out the origin time locate_nullDouble64(ngrd, t0); // Set space res = memory_calloc__double(nobsUse); //resSort = memory_calloc__double(nobsUse); wtSort = memory_calloc__double(nobsUse); // Normalize the weights? wtsumi = one/wtsum; if (fabs(wtsum - one) > 1.e-14) { for (iobs=0; iobs<nobsUse; iobs++) { wt[iobs] = wt[iobs]*wtsumi; } } for (iobs=0; iobs<nobsUse; iobs++){perm[iobs] = iobs;} /* #pragma omp parallel for \ firstprivate (obsPtr, perm, res, tobsUse, wt, wtSort) \ private (ierr, igrd, igrd1, igrd2, iobs, jgrd, jndx, jobs, lsort, rsum, t0opt) \ shared (nchunk, ngrd, nobsUse, objfn, t0, test, zero) \ default (none) reduction (+:nsort) */ // Loop on chunks for (jgrd=0; jgrd<ngrd; jgrd=jgrd+nchunk) { igrd1 = jgrd; igrd2 = MIN(ngrd, jgrd+nchunk); // Loop on the subgrid for (igrd=igrd1; igrd<igrd2; igrd++) { // Extract the estimates for (iobs=0; iobs<nobsUse; iobs++) { jobs = obsPtr[iobs]; jndx = jobs*ldgrd + igrd; res[iobs] = tobsUse[iobs] - test[jndx]; wtSort[iobs] = wt[iobs]; } t0opt = weightedMedian__double(nobsUse, res, wtSort, perm, &lsort, &ierr); if (lsort){nsort = nsort + 1;} t0[igrd] = t0opt; } } free(res); printf("%d %d\n", nsort, ngrd); } // Simply set the origin time to t0 else { locate_setDouble64(ngrd, t0use, t0); } // Compute the L1 misfit function at all points - loop on chunks #pragma omp parallel for \ private (igrd, igrd1, igrd2, iobs, jgrd, jobs, jndx, tobs_i, wt_i) \ shared (ldgrd, nchunk, nobsUse, objfn, obsPtr, test, t0, tobsUse, wt) \ default (none) for (jgrd=0; jgrd<ngrd; jgrd=jgrd+nchunk) { igrd1 = jgrd; igrd2 = MIN(ngrd, jgrd+nchunk); // Compute the L1 norm for all observations for (iobs=0; iobs<nobsUse; iobs++) { jobs = obsPtr[iobs]; tobs_i = tobsUse[iobs]; wt_i = wt[iobs]; // Extract the estimate at each grid point in chunk for (igrd=igrd1; igrd<igrd2; igrd++) { jndx = jobs*ldgrd + igrd; objfn[igrd] = objfn[igrd] + wt_i*fabs(tobs_i - test[jndx] - t0[igrd]); } } } //free(testPerm); free(wt); free(perm); return ierr; }
GB_binop__isle_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_int8) // A.*B function (eWiseMult): GB (_AemultB_08__isle_int8) // A.*B function (eWiseMult): GB (_AemultB_02__isle_int8) // A.*B function (eWiseMult): GB (_AemultB_04__isle_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int8) // A*D function (colscale): GB (_AxD__isle_int8) // D*A function (rowscale): GB (_DxB__isle_int8) // C+=B function (dense accum): GB (_Cdense_accumB__isle_int8) // C+=b function (dense accum): GB (_Cdense_accumb__isle_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int8) // C=scalar+B GB (_bind1st__isle_int8) // C=scalar+B' GB (_bind1st_tran__isle_int8) // C=A+scalar GB (_bind2nd__isle_int8) // C=A'+scalar GB (_bind2nd_tran__isle_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT8 || GxB_NO_ISLE_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isle_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isle_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isle_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
RelativeNeighborhoodGraph.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_COMMON_RNG_H_ #define _SPTAG_COMMON_RNG_H_ #include "NeighborhoodGraph.h" namespace SPTAG { namespace COMMON { class RelativeNeighborhoodGraph: public NeighborhoodGraph { public: void RebuildNeighbors(VectorIndex* index, const SizeType node, SizeType* nodes, const BasicResult* queryResults, const int numResults) { DimensionType count = 0; for (int j = 0; j < numResults && count < m_iNeighborhoodSize; j++) { const BasicResult& item = queryResults[j]; if (item.VID < 0) break; if (item.VID == node) continue; bool good = true; for (DimensionType k = 0; k < count; k++) { if (index->ComputeDistance(index->GetSample(nodes[k]), index->GetSample(item.VID)) <= item.Dist) { good = false; break; } } if (good) nodes[count++] = item.VID; } for (DimensionType j = count; j < m_iNeighborhoodSize; j++) nodes[j] = -1; } void InsertNeighbors(VectorIndex* index, const SizeType node, SizeType insertNode, float insertDist) { std::lock_guard<std::mutex> lock(m_dataUpdateLock); SizeType* nodes = m_pNeighborhoodGraph[node]; SizeType tmpNode; float tmpDist; for (DimensionType k = 0; k < m_iNeighborhoodSize; k++) { tmpNode = nodes[k]; if (tmpNode < -1) break; if (tmpNode < 0 || (tmpDist = index->ComputeDistance(index->GetSample(node), index->GetSample(tmpNode))) > insertDist || (insertDist == tmpDist && insertNode < tmpNode)) { bool good = true; for (DimensionType t = 0; t < k; t++) { if (index->ComputeDistance(index->GetSample(insertNode), index->GetSample(nodes[t])) < insertDist) { good = false; break; } } if (good) { nodes[k] = insertNode; while (tmpNode >= 0 && ++k < m_iNeighborhoodSize && nodes[k] >= -1 && index->ComputeDistance(index->GetSample(tmpNode), index->GetSample(insertNode)) >= index->ComputeDistance(index->GetSample(node), index->GetSample(tmpNode))) { std::swap(tmpNode, nodes[k]); } } break; } } } float GraphAccuracyEstimation(VectorIndex* index, const SizeType samples, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { DimensionType* correct = new DimensionType[samples]; #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < samples; i++) { SizeType x = COMMON::Utils::rand(m_iGraphSize); //int x = i; COMMON::QueryResultSet<void> query(nullptr, m_iCEF); for (SizeType y = 0; y < m_iGraphSize; y++) { if ((idmap != nullptr && idmap->find(y) != idmap->end())) continue; float dist = index->ComputeDistance(index->GetSample(x), index->GetSample(y)); query.AddPoint(y, dist); } query.SortResult(); SizeType * exact_rng = new SizeType[m_iNeighborhoodSize]; RebuildNeighbors(index, x, exact_rng, query.GetResults(), m_iCEF); correct[i] = 0; for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) { if (exact_rng[j] == -1) { correct[i] += m_iNeighborhoodSize - j; break; } for (DimensionType k = 0; k < m_iNeighborhoodSize; k++) if ((m_pNeighborhoodGraph)[x][k] == exact_rng[j]) { correct[i]++; break; } } delete[] exact_rng; } float acc = 0; for (SizeType i = 0; i < samples; i++) acc += float(correct[i]); acc = acc / samples / m_iNeighborhoodSize; delete[] correct; return acc; } }; } } #endif
GB_unaryop__minv_fp64_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp64_int16 // op(A') function: GB_tran__minv_fp64_int16 // C type: double // A type: int16_t // cast: double cij = (double) aij // unaryop: cij = 1./aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1./x ; // casting #define GB_CASTING(z, aij) \ double z = (double) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp64_int16 ( double *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
task_types_serialized.c
// RUN: %libomp-compile-and-run | FileCheck %s // REQUIRES: ompt #include "callback.h" #include <omp.h> void print_task_type(int id) { #pragma omp critical { int task_type; char buffer[2048]; ompt_get_task_info(0, &task_type, NULL, NULL, NULL, NULL); format_task_type(task_type, buffer); printf("%" PRIu64 ": id=%d task_type=%s=%d\n", ompt_get_thread_data()->value, id, buffer, task_type); } }; int main() { //initial task print_task_type(0); int x; //implicit task #pragma omp parallel num_threads(1) { print_task_type(1); x++; } #pragma omp parallel num_threads(1) #pragma omp master { //explicit task #pragma omp task { print_task_type(2); x++; } //explicit task with undeferred #pragma omp task if(0) { print_task_type(3); x++; } //explicit task with untied #pragma omp task untied { print_task_type(4); x++; } //explicit task with final #pragma omp task final(1) { print_task_type(5); x++; //nested explicit task with final and undeferred #pragma omp task { print_task_type(6); x++; } } /* //TODO:not working //explicit task with mergeable #pragma omp task mergeable { print_task_type(7); x++; } */ //TODO: merged task } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create: parent_task_id=0, parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[NULL]], new_task_id={{[0-9]+}}, codeptr_ra=[[NULL]], task_type=ompt_task_initial=1, has_dependences=no // CHECK: {{^}}[[MASTER_ID]]: id=0 task_type=ompt_task_initial=1 // CHECK: {{^}}[[MASTER_ID]]: id=1 task_type=ompt_task_implicit|ompt_task_undeferred=134217730 // CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred=134217732, has_dependences=no // CHECK: {{^[0-9]+}}: id=2 task_type=ompt_task_explicit|ompt_task_undeferred=134217732 // CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred=134217732, has_dependences=no // CHECK: {{^[0-9]+}}: id=3 task_type=ompt_task_explicit|ompt_task_undeferred=134217732 // CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_untied=402653188, has_dependences=no // CHECK: {{^[0-9]+}}: id=4 task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_untied=402653188 // CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_final=671088644, has_dependences=no // CHECK: {{^[0-9]+}}: id=5 task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_final=671088644 // CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_final=671088644, has_dependences=no // CHECK: {{^[0-9]+}}: id=6 task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_final=671088644 // ___CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred=134217732, has_dependences=no // ___CHECK: {{^[0-9]+}}: id=7 task_type=ompt_task_explicit|ompt_task_undeferred=134217732 return 0; }
Molecule.h
/* * Molecule.h * * Created on: 07/set/2014 * Author: sebastian */ #ifndef DESCRIPTOREVALUATION_BACKUP_SRC_MOLECULE_MOLECULE_H_ #define DESCRIPTOREVALUATION_BACKUP_SRC_MOLECULE_MOLECULE_H_ #include "../DockingMethods/DockingPose.h" #include "../kdtree/kdtree.h" #include "../MolecularSurface/MolecularSurface.h" #include "../PQR/PQRModel.h" #include "../PDB/PDBModel.h" #include <string.h> #include <chrono> #include <ctime> #include <cstdlib> // std::rand, std::srand using namespace std; class Molecule { public: MolecularSurface * surface; PQRModel * pqrModel; PDBModel * pdbModel; uint16_t length, width, height; // bounding box dimensions point3D translation; // translation vector double SEV; /**< Solvent-Excluded Volume */ double ASA; /**< Accessible Surface Area */ double dG; /**< Solvation Energy */ vector<double> perAtomASA; vector<vector<point3D>> per_atom_SA_points; vector<CompactPatchDescriptor> descriptors; vector<atom> const * atoms; vector<atom> outer_atoms; kdtree_atom atoms_tree; kdtree_atom core_atoms_tree; kdtree_atom outer_atoms_tree; float max_atm_radius; float min_atm_radius; vector<point3D> sphere_points; int n_sphere_points; vector<voxel> interface_patch_centers, other_patch_centers; Molecule(float patchRadius, float probeRadius, float resolution, string const & inname, string const & outname, string const & inname_radii, int maxOrder, bool no_hydrogen, bool no_hetatm, bool receptor = true); virtual ~Molecule(); void outputMoleculeDescriptors(string const & filename); void outputMoleculePQR(string const & filename); void outputMoleculePQR(string const & filename, DockingPose const & p); void calculateDescriptors(string const & outname, float minCenterDist_noninterface, float minCenterDist_interface, float patchRadius, int maxOrder, array3D const & interface, vector<CompactPatchDescriptor> & out_descriptors) { auto t_start = chrono::high_resolution_clock::now(); cout << "Extracting surface patches\n"; surface->extractPatchCenters(minCenterDist_noninterface, other_patch_centers); cout << "Calculating Surface Descriptors\n"; vector<CompactPatchDescriptor> int_d, nint_d; surface->calculateSurfaceDescriptors(patchRadius, maxOrder, other_patch_centers, interface, nint_d); size_t ip = 0, nip = 0; for (auto const & p : nint_d) { if (p.isInterface) { ++ip; } else { ++nip; out_descriptors.push_back(p); } } double ratio = ip / (double) nip; cout << "Extracting interface patches\n"; surface->extractInterfacePatchCenters(minCenterDist_interface, interface, interface_patch_centers); cout << "Calculating interface descriptors\n"; surface->calculateSurfaceDescriptors(patchRadius, maxOrder, interface_patch_centers, interface, int_d); for (auto const & p : int_d) { if (p.isInterface) out_descriptors.push_back(p); } auto t_ms = chrono::duration_cast<chrono::milliseconds>(chrono::high_resolution_clock::now() - t_start).count(); cout << "Patch extraction and descriptor \ncalculation time:\t" << t_ms / 1000.0 << " seconds.\n"<<endl; } // void calculateInterfaceDescriptors(string const & outname, float minCenterDist, float patchRadius, int maxOrder, array3D const & interface, double threshold, vector<CompactPatchDescriptor> & out_descriptors) { // auto t_start = chrono::high_resolution_clock::now(); // cout << "Extracting interface patches\n"; // surface->extractInterfacePatchCenters(minCenterDist, interface, interface_patch_centers); // cout << "Calculating interface descriptors\n"; // surface->calculateSurfaceDescriptors(patchRadius, maxOrder, interface_patch_centers, interface, threshold, out_descriptors); // auto t_ms = chrono::duration_cast<chrono::milliseconds>(chrono::high_resolution_clock::now() - t_start).count(); //// out_descriptors.erase(std::remove_if(out_descriptors.begin(), out_descriptors.end(), //// [](CompactPatchDescriptor const & cpd) //// { return (not cpd.isInterface); }), //// out_descriptors.end()); // interface_descriptors_count = out_descriptors.size(); // cout << "Patch extraction and descriptor \ncalculation time:\t" << t_ms / 1000.0 << " seconds.\n"<<endl; // } /** * Returns list of coordinates on a sphere using the Golden-Section Spiral algorithm. * @param n number of points on the sphere * @param points output vector of generated points */ inline void generateSpherePoints(size_t n) { sphere_points.resize(n); double y, r, phi; double inc = M_PI * (3 - sqrt(5.0)); double offset = 2.0 / n; for (size_t i = 0; i < n; ++i) { y = i * offset - 1 + (offset / 2.0); r = sqrt(1 - y * y); phi = i * inc; sphere_points[i] = point3D(cos(phi)*r, y, sin(phi)*r); } }; /** * Determines if the given point is buried inside the volume of the molecule. * @param point The input point * @return true if the input point is buried, false otherwise, i.e. if it is * solvent accessible */ inline bool is_buried(point3D const & point) { vector<pair<size_t, float> > ret_matches; float searchRad = (this->surface->probeRadius + Molecule::max_atm_radius); float s_searchRad = searchRad * searchRad; size_t k = atoms_tree.radiusSearch(point, s_searchRad, ret_matches); for (size_t ii = 0; ii < k; ++ii) { atom const * nb = &this->pqrModel->atomsInModel[ret_matches[ii].first]; float atm_rad = nb->radius + this->surface->probeRadius; if (floatCompare(nb->distance(point), atm_rad)) continue; if (nb->distance(point) < atm_rad) return true; } return false; }; inline double calculate_AccessibleSurfaceArea(vector<atom> const & atoms, vector<vector<point3D>> & per_atom_sa_points, vector<double> & per_atom_asa, size_t n_sphere_points = 96) { size_t num_atoms = atoms.size(); if (num_atoms == 0) return -1; per_atom_sa_points.resize(num_atoms); per_atom_asa.resize(num_atoms); #pragma omp critical { if (sphere_points.empty()) generateSpherePoints(n_sphere_points); } double total_ASA = 0; double c = 4.0 * M_PI / n_sphere_points; for (size_t i = 0; i < num_atoms; ++i) { size_t n_accessible_pts = 0; double radius = atoms[i].radius + this->surface->probeRadius; point3D atomCenter(atoms[i].x, atoms[i].y, atoms[i].z); for (size_t j = 0; j < n_sphere_points; ++j) { point3D currentPoint = radius * sphere_points[j] + atomCenter; if (!is_buried(currentPoint)) { ++n_accessible_pts; per_atom_sa_points[i].push_back(currentPoint); } } per_atom_asa[i] = c * n_accessible_pts * radius * radius; total_ASA += per_atom_asa[i]; } return total_ASA; }; }; #endif /* DESCRIPTOREVALUATION_BACKUP_SRC_MOLECULE_MOLECULE_H_ */
SpatialClassNLLCriterion.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialClassNLLCriterion.c" #else #define INITIAL_CHECK \ THArgCheck(THIndexTensor_(nDimension)(target) == 3, 3, \ "only batches of spatial targets supported (3D tensors)" \ " but got targets of dimension: %d", \ THIndexTensor_(nDimension)(target)); \ THArgCheck(THTensor_(nDimension)(input) == 4, 2, \ "only batches of spatial inputs supported (4D tensors), " \ "but got input of dimension: %d", THTensor_(nDimension)(input)); \ if (weights && THTensor_(nElement)(weights) != THTensor_(size)(input, 1)) { \ THError("weight tensor should be defined either for all or no classes"); \ } \ \ { \ int64_t input0 = THTensor_(size)(input, 0); \ int64_t input1 = THTensor_(size)(input, 1); \ int64_t input2 = THTensor_(size)(input, 2); \ int64_t input3 = THTensor_(size)(input, 3); \ int64_t target0 = THIndexTensor_(size)(target, 0); \ int64_t target1 = THIndexTensor_(size)(target, 1); \ int64_t target2 = THIndexTensor_(size)(target, 2); \ THAssertMsg(input0 == target0 && input2 == target1 && input3 == target2, \ "size mismatch (got input: %ldx%ldx%ldx%ld, target: %ldx%ldx%ld)", \ input0, input1, input2, input3, target0, target1, target2); \ } #define GRADOUTPUT_SHAPE_CHECK \ THArgCheck(THTensor_(nDimension)(gradOutput) == 3, 3, \ "gradOutput must have same dimension as target (3)" \ " but got dimension: %d", \ THTensor_(nDimension)(gradOutput)); \ { \ int64_t gradOutput0 = THTensor_(size)(gradOutput, 0); \ int64_t gradOutput1 = THTensor_(size)(gradOutput, 1); \ int64_t gradOutput2 = THTensor_(size)(gradOutput, 2); \ int64_t target0 = THIndexTensor_(size)(target, 0); \ int64_t target1 = THIndexTensor_(size)(target, 1); \ int64_t target2 = THIndexTensor_(size)(target, 2); \ THAssertMsg( \ gradOutput0 == target0 && gradOutput1 == target1 && gradOutput2 == target2, \ "size mismatch (got gradOutput: %ldx%ldx%ld, target: %ldx%ldx%ld)", \ gradOutput0, gradOutput1, gradOutput2, target0, target1, target2); \ } void THNN_(SpatialClassNLLCriterion_updateOutput)( THNNState *state, THTensor *input, THIndexTensor *target, THTensor *output, bool sizeAverage, THTensor *weights, THTensor *total_weight, int64_t ignore_index, bool reduce) { INITIAL_CHECK; THTensor_(resize1d)(output, 1); THTensor_(resize1d)(total_weight, 1); ignore_index -= TH_INDEX_BASE; if (!reduce) { int64_t batch_size = THTensor_(size)(input, 0); int64_t H = THTensor_(size)(input, 2); int64_t W = THTensor_(size)(input, 3); THTensor_(resize3d)(output, batch_size, H, W); int64_t b, h, w; #pragma omp parallel for private(b, h, w) for (b = 0; b < batch_size; b++) { for (h = 0; h < H; h++) { for (w = 0; w < W; w++) { int64_t cur_target = (int64_t)THIndexTensor_(get3d)(target, b, h, w) - TH_INDEX_BASE; if (cur_target == ignore_index) { THTensor_fastSet3d(output, b, h, w, 0.0f); continue; } real value = THTensor_fastGet4d(input, b, cur_target, h, w); real weight = weights ? THTensor_fastGet1d(weights, cur_target) : 1.0f; THTensor_fastSet3d(output, b, h, w, -value * weight); } } } return; } input = THTensor_(newContiguous)(input); target = THIndexTensor_(newContiguous)(target); weights = weights ? THTensor_(newContiguous)(weights) : NULL; real *input_data = THTensor_(data)(input); THIndex_t *target_data = THIndexTensor_(data)(target); real *weights_data = weights ? THTensor_(data)(weights) : NULL; real *output_data = THTensor_(data)(output); real *total_weight_data = THTensor_(data)(total_weight); int64_t batch_size = THTensor_(size)(input, 0); int64_t n_classes = THTensor_(size)(input, 1); int64_t map_size = THTensor_(size)(input, 2) * THTensor_(size)(input, 3); int64_t sample_size = map_size * n_classes; real total_weight_acc = 0; real output_acc = 0; for (int b = 0; b < batch_size; b++) { for (int elem = 0; elem < map_size; elem++) { int cur_target = target_data[b * map_size + elem] - TH_INDEX_BASE; if (cur_target == ignore_index) continue; THAssert(cur_target >= 0 && cur_target < n_classes); real cur_weight = weights ? weights_data[cur_target] : 1.0f; total_weight_acc += cur_weight; output_acc -= input_data[b * sample_size + cur_target * map_size + elem] * cur_weight; } } *total_weight_data = total_weight_acc; *output_data = output_acc; if (sizeAverage && *total_weight_data) *output_data /= *total_weight_data; THTensor_(free)(input); THIndexTensor_(free)(target); if (weights) THTensor_(free)(weights); } void THNN_(SpatialClassNLLCriterion_updateGradInput)( THNNState *state, THTensor *input, THIndexTensor *target, THTensor *gradOutput, THTensor *gradInput, bool sizeAverage, THTensor *weights, THTensor *total_weight, int64_t ignore_index, bool reduce) { INITIAL_CHECK; THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); THArgCheck(THTensor_(isContiguous)(gradInput), 4, "gradInput must be contiguous"); THNN_CHECK_SHAPE(input, gradInput); ignore_index -= TH_INDEX_BASE; if (!reduce) { GRADOUTPUT_SHAPE_CHECK; int64_t batch_size = THTensor_(size)(input, 0); int64_t H = THTensor_(size)(input, 2); int64_t W = THTensor_(size)(input, 3); int64_t b, h, w; #pragma omp parallel for private(b, h, w) for (b = 0; b < batch_size; b++) { for (h = 0; h < H; h++) { for (w = 0; w < W; w++) { int64_t cur_target = (int64_t)THIndexTensor_(get3d)(target, b, h, w) - TH_INDEX_BASE; if (cur_target == ignore_index) { continue; } real value = -(weights ? THTensor_fastGet1d(weights, cur_target) : 1.0f); real gradOutput_value = THTensor_fastGet3d(gradOutput, b, h, w); THTensor_fastSet4d(gradInput, b, cur_target, h, w, value * gradOutput_value); } } } return; } THNN_CHECK_DIM_SIZE(gradOutput, 1, 0, 1); real *total_weight_data = THTensor_(data)(total_weight); if (*total_weight_data <= 0) return; target = THIndexTensor_(newContiguous)(target); weights = weights ? THTensor_(newContiguous)(weights) : NULL; THIndex_t *target_data = THIndexTensor_(data)(target); real *weights_data = weights ? THTensor_(data)(weights) : NULL; real *gradInput_data = THTensor_(data)(gradInput); int64_t batch_size = THTensor_(size)(input, 0); int64_t n_classes = THTensor_(size)(input, 1); int64_t map_size = THTensor_(size)(input, 2) * THTensor_(size)(input, 3); int64_t sample_size = map_size * n_classes; real normalize = (sizeAverage) ? *total_weight_data : 1.0f; int b; #pragma omp parallel for for (b = 0; b < batch_size; b++) { int elem; for (elem = 0; elem < map_size; elem++) { int cur_target = target_data[b * map_size + elem] - TH_INDEX_BASE; if (cur_target == ignore_index) continue; THAssert(cur_target >= 0 && cur_target < n_classes); int index = b * sample_size + cur_target * map_size + elem; gradInput_data[index] = -(weights ? weights_data[cur_target] : 1.0f) / normalize * THTensor_fastGet1d(gradOutput, 0); } } THIndexTensor_(free)(target); if (weights) THTensor_(free)(weights); } #undef INITIAL_CHECK #endif
hermm_c_dia_u_lo_row_trans.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include <memory.h> #include <stdlib.h> alphasparse_status_t ONAME(const ALPHA_Complex alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Complex *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Complex beta, ALPHA_Complex *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT r = 0; r < mat->rows; r++) for(ALPHA_INT c = 0; c < columns; c++){ alpha_mul(y[index2(r,c,ldy)],y[index2(r,c,ldy)],beta); alpha_madde(y[index2(r,c,ldy)],x[index2(r,c,ldx)],alpha); } #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT bcl = cross_block_low(tid,num_threads,columns); ALPHA_INT bch = cross_block_high(tid,num_threads,columns); for(ALPHA_INT di = 0; di < mat->ndiag;++di){ ALPHA_INT d = mat->distance[di]; if(d < 0){ ALPHA_INT ars = alpha_max(0,-d); ALPHA_INT acs = alpha_max(0,d); ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs); for(ALPHA_INT i = 0; i < an; ++i){ ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Complex val,val_c; alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha); alpha_mul_2c(val_c,mat->values[index2(di,ar,mat->lval)],alpha); for(ALPHA_INT bc = bcl;bc < bch;++bc){ alpha_madde(y[index2(ar,bc,ldy)],val_c,x[index2(ac,bc,ldx)]); alpha_madde(y[index2(ac,bc,ldy)],val,x[index2(ar,bc,ldx)]); } } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
omp_hello.c
#include <stdio.h> #include <omp.h> int main() { omp_set_num_threads(2); #pragma omp parallel { printf("Hello OpenMP\n"); } return 0; }
paraGraph.h
#ifndef __PARAGRAPH_H__ #define __PARAGRAPH_H__ #include <stdio.h> #include <stdbool.h> #include <stdlib.h> #include <string.h> #include "vertex_set.h" #include "graph.h" #include "mic.h" #include "ts_hashtable.h" #include "util.h" #include <time.h> #include <immintrin.h> #define CHUNK_SIZE 32 #define MAX_THREAD_NUM 256 /* * edgeMap -- * * Students will implement this function. * * The input argument f is a class with the following methods defined: * bool update(Vertex src, Vertex dst) * bool cond(Vertex v) * * See apps/bfs.cpp for an example of such a class definition. * * When the argument removeDuplicates is false, the implementation of * edgeMap need not remove duplicate vertices from the VertexSet it * creates when iterating over edges. This is a performance * optimization when the application knows (and can tell ParaGraph) * that f.update() guarantees that duplicate vertices cannot appear in * the output vertex set. * * Further notes: the implementation of edgeMap is templated on the * type of this object, which allows for higher performance code * generation as these methods will be inlined. */ template <class F> static VertexSet *edgeMap(Graph g, VertexSet *u, F &f, bool removeDuplicates=true) { // outputSubset = {} // foreach u in U: (in parallel) // for each outgoing edge (u,v) from u: (in parallel) // if (C(v) && F(u,v)) // outputSubset.append(v) // remove_duplicates(outputSubset) // return outputSubset static int edge_counts[MAX_THREAD_NUM]; static int edge_sizes[MAX_THREAD_NUM]; int size = u -> size; int total_num = num_nodes(g); VertexSet* ret; bool need_free = false; if(size < total_num / 100) { // ensure u is SPARSE if(u -> type != SPARSE) { u = ConvertDenseToSparse(u); need_free = true; } ts_hashtable * hash_table; int max_threads = omp_get_max_threads(); Vertex * vertices = u -> vertices; #pragma omp parallel { int numThreads = omp_get_num_threads(); int blockSize = (size + numThreads - 1)/ numThreads; int tid = omp_get_thread_num(); int start = blockSize * tid; int end = start + blockSize; end = end > size ? size : end; int localCount = 0; for (int i = start; i < end; i++) { int diff = outgoing_size(g, vertices[i]); localCount += diff; } #pragma vector nontemporal(edge_counts) edge_counts[tid] = localCount; } int numNextPow2 = nextPow2(max_threads + 1); exclusive_scan(edge_counts, numNextPow2); int capacity = edge_counts[max_threads]; int * edges = (int *)malloc(sizeof(int) * (capacity + 1)); // top down approach if(removeDuplicates) hash_table = new_hashtable(capacity | 1); //odd number capacity #pragma omp parallel { int numThreads = omp_get_num_threads(); int blockSize = (size + numThreads - 1)/ numThreads; int tid = omp_get_thread_num(); int start = blockSize * tid; int end = start + blockSize; end = end > size ? size : end; int localSize = 0; int localOffset; #pragma vector nontemporal(edge_counts) localOffset = edge_counts[tid]; for (int i = start; i < end; i++) { const Vertex v_i = vertices[i]; const Vertex* start = outgoing_begin(g, v_i); const Vertex* end = outgoing_end(g, v_i); for (const Vertex* k = start; k != end; k++) { if (f.cond(*k) && f.update(v_i, *k) && (!removeDuplicates || !hashtable_set(hash_table, *k))) { edges[localOffset + localSize] = *k; localSize++; } } } #pragma vector nontemporal(edge_sizes) edge_sizes[tid] = localSize; } exclusive_scan(edge_sizes, numNextPow2); capacity = edge_sizes[max_threads]; ret = newVertexSet(SPARSE, capacity, total_num); #pragma omp parallel for schedule(static) for(int i = 0; i < max_threads; i++) { int edge_sizestart = edge_sizes[i]; int edgeStart = edge_counts[i]; int length = edge_sizes[i + 1] - edge_sizes[i]; if(length > 0) memcpy(&ret -> vertices[edge_sizestart], &edges[edgeStart], sizeof(int) * length); } setSize(ret, capacity); free(edges); if(removeDuplicates) hashtable_free(hash_table); } else { // ensure u is DENSE if(u -> type != DENSE) { u = ConvertSparseToDense(u); need_free = true; } // buttom up approach ret = newVertexSet(DENSE, size, total_num); // Vertex is typedef'ed as int int total_size = 0; #pragma omp parallel for schedule(dynamic, 32) reduction(+:total_size) for(Vertex chunk = 0; chunk < total_num; chunk+=CHUNK_SIZE) { int mapValue = 0; for(int i = chunk; i < (chunk + CHUNK_SIZE) && i < total_num; i++) { bool hasAdded = false; const Vertex* k = incoming_begin(g, i); const Vertex* end = incoming_end(g, i); while(f.cond(i) && k != end) { if ((u -> size == u -> numNodes || DenseHasVertex(u, *k)) && f.update(*k, i) && !hasAdded) { hasAdded = true; mapValue |= 1 << (i - chunk); total_size += 1; } k++; } } DenseSetMapValue(ret, chunk / CHUNK_SIZE, mapValue); } setSize(ret, total_size); } if(need_free) freeVertexSet(u); return ret; } /* * vertexMap -- * * Students will implement this function. * * The input argument f is a class with the following methods defined: * bool operator()(Vertex v) * * See apps/kBFS.cpp for an example implementation. * * Note that you'll call the function on a vertex as follows: * Vertex v; * bool result = f(v) * * If returnSet is false, then the implementation of vertexMap should * return NULL (it need not build and create a vertex set) */ template <class F> static VertexSet *vertexMap(VertexSet *u, F &f, bool returnSet=true) { // 1. apply F to all vertices in U // 2. return a new vertex subset containing all vertices u in U // for which F(u) == true int size = u -> size; int numNodes = u -> numNodes; if(u -> type == SPARSE) { Vertex * vertices = u -> vertices; if (returnSet) { VertexSet* ret = newVertexSet(SPARSE, size, numNodes); #pragma omp parallel for for (int i = 0; i < size; i++) { if (f(vertices[i])) { addVertex(ret, vertices[i]); } } return ret; } else { #pragma omp parallel for for (int i = 0; i < size; i++) { f(vertices[i]); } return NULL; } } else { if (returnSet) { int total_size = 0; VertexSet* ret = newVertexSet(DENSE, size, numNodes); #pragma omp parallel for schedule(static) reduction(+:total_size) for(int chunk = 0; chunk < numNodes; chunk+=CHUNK_SIZE) { int mapValue = 0; for(int i = chunk; i < (chunk + CHUNK_SIZE) && i < numNodes; i++) { if (DenseHasVertex(u, i) && f(i)) { mapValue |= 1 << (i - chunk); total_size += 1; } } DenseSetMapValue(ret, chunk / CHUNK_SIZE, mapValue); } setSize(ret, total_size); return ret; } else { #pragma omp parallel for schedule(static) for(int chunk = 0; chunk < numNodes; chunk+=CHUNK_SIZE) { int base = chunk / CHUNK_SIZE; int map = DenseGetMapValue(u, base); for(int i = chunk; i < (chunk + CHUNK_SIZE) && i < numNodes; i++) { if((map & (1 << (i-chunk)))) f(i); } } return NULL; } } } #endif /* __PARAGRAPH_H__ */
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distribute-cache-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const Quantum *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static const void *GetVirtualMetacontentFromCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *, ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,Quantum *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), OpenPixelCacheOnDisk(CacheInfo *,const MapMode), ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), ReadPixelCacheMetacontent(CacheInfo *magick_restrict, NexusInfo *magick_restrict,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *); static Quantum *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *,const MapMode, const RectangleInfo *,NexusInfo *,ExceptionInfo *) magick_hot_spot; #if defined(MAGICKCORE_OPENCL_SUPPORT) static void CopyOpenCLBuffer(CacheInfo *magick_restrict); #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; static ssize_t cache_anonymous_memory = (-1); static time_t cache_epoch = 0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *magick_restrict cache_info; char *value; cache_info=(CacheInfo *) AcquireQuantumMemory(1,sizeof(*cache_info)); if (cache_info == (CacheInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); value=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } value=GetPolicyValue("cache:synchronize"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } cache_info->semaphore=AcquireSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AcquireSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickCoreSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **magick_restrict nexus_info; register ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory( number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(number_threads, sizeof(**nexus_info)); if (nexus_info[0] == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(nexus_info[0],0,number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) number_threads; i++) { nexus_info[i]=(&nexus_info[0][i]); nexus_info[i]->signature=MagickCoreSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % const void *AcquirePixelCachePixels(const Image *image, % MagickSizeType *length,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate const void *AcquirePixelCachePixels(const Image *image, MagickSizeType *length,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((const void *) NULL); *length=cache_info->length; return((const void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickPrivate MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AcquireSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickPrivate void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); /* no op-- nothing to destroy */ RelinquishSemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate Cache ClonePixelCache(const Cache cache) { CacheInfo *magick_restrict clone_info; const CacheInfo *magick_restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *magick_restrict cache_info, *magick_restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickCoreSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClonePixelCacheOnDisk( CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info) { MagickSizeType extent; size_t quantum; ssize_t count; struct stat file_stats; unsigned char *buffer; /* Clone pixel cache on disk with identical morphology. */ if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) || (OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse)) return(MagickFalse); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0)) quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); extent=0; while ((count=read(cache_info->file,buffer,quantum)) > 0) { ssize_t number_bytes; number_bytes=write(clone_info->file,buffer,(size_t) count); if (number_bytes != count) break; extent+=number_bytes; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); if (extent != cache_info->length) return(MagickFalse); return(MagickTrue); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads 2 #define cache_threads(source,destination) \ num_threads(((source)->type == DiskCache) || \ ((destination)->type == DiskCache) || (((source)->rows) < \ (16*GetMagickResourceLimit(ThreadResource))) ? 1 : \ GetMagickResourceLimit(ThreadResource) < MaxCacheThreads ? \ GetMagickResourceLimit(ThreadResource) : MaxCacheThreads) MagickBooleanType optimize, status; NexusInfo **magick_restrict cache_nexus, **magick_restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); length=cache_info->number_channels*sizeof(*cache_info->channel_map); if ((cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) && (cache_info->metacontent_extent == clone_info->metacontent_extent)) { /* Identical pixel cache morphology. */ if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache))) { (void) memcpy(clone_info->pixels,cache_info->pixels, cache_info->columns*cache_info->number_channels*cache_info->rows* sizeof(*cache_info->pixels)); if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) (void) memcpy(clone_info->metacontent,cache_info->metacontent, cache_info->columns*cache_info->rows* clone_info->metacontent_extent*sizeof(unsigned char)); return(MagickTrue); } if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache)) return(ClonePixelCacheOnDisk(cache_info,clone_info)); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads); clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads); if ((cache_nexus == (NexusInfo **) NULL) || (clone_nexus == (NexusInfo **) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); length=cache_info->number_channels*sizeof(*cache_info->channel_map); optimize=(cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ? MagickTrue : MagickFalse; length=(size_t) MagickMin(cache_info->columns*cache_info->number_channels, clone_info->columns*clone_info->number_channels); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ cache_threads(cache_info,clone_info) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; RectangleInfo region; register ssize_t x; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region, cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region, clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; (void) ResetMagickMemory(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); if (optimize != MagickFalse) (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length* sizeof(Quantum)); else { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; /* Mismatched pixel channel map. */ p=cache_nexus[id]->pixels; q=clone_nexus[id]->pixels; for (x=0; x < (ssize_t) cache_info->columns; x++) { register ssize_t i; if (x == (ssize_t) clone_info->columns) break; for (i=0; i < (ssize_t) clone_info->number_channels; i++) { PixelChannel channel; PixelTrait traits; channel=clone_info->channel_map[i].channel; traits=cache_info->channel_map[channel].traits; if (traits != UndefinedPixelTrait) *q=*(p+cache_info->channel_map[channel].offset); q++; } p+=cache_info->number_channels; } } status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) { /* Clone metacontent. */ length=(size_t) MagickMin(cache_info->metacontent_extent, clone_info->metacontent_extent); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ cache_threads(cache_info,clone_info) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; RectangleInfo region; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region, cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region, clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; if ((clone_nexus[id]->metacontent != (void *) NULL) && (cache_nexus[id]->metacontent != (void *) NULL)) (void) memcpy(clone_nexus[id]->metacontent, cache_nexus[id]->metacontent,length*sizeof(unsigned char)); status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception); } } cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads); clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache == (void *) NULL) return; image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { #if defined(MAGICKCORE_OPENCL_SUPPORT) if (cache_info->opencl != (MagickCLCacheInfo) NULL) { cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl, MagickTrue); cache_info->pixels=(Quantum *) NULL; break; } #endif if (cache_info->mapped == MagickFalse) cache_info->pixels=(Quantum *) RelinquishAlignedMemory( cache_info->pixels); else (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(Quantum *) NULL; if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->metacontent=(void *) NULL; } MagickPrivate Cache DestroyPixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickCoreSignature); cache_info=(CacheInfo *) RelinquishMagickMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(Quantum *) NULL; nexus_info->pixels=(Quantum *) NULL; nexus_info->metacontent=(void *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { register ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) number_threads; i++) { if (nexus_info[i]->cache != (Quantum *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickCoreSignature); } nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontent() returns the authentic metacontent corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the associated pixels are not available. % % The format of the GetAuthenticMetacontent() method is: % % void *GetAuthenticMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void *GetAuthenticMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) { void *metacontent; metacontent=cache_info->methods. get_authentic_metacontent_from_handler(image); return(metacontent); } assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontentFromCache() returns the meta-content corresponding % with the last call to QueueAuthenticPixelsCache() or % GetAuthenticPixelsCache(). % % The format of the GetAuthenticMetacontentFromCache() method is: % % void *GetAuthenticMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void *GetAuthenticMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL % operations. % % The format of the GetAuthenticOpenCLBuffer() method is: % % cl_mem GetAuthenticOpenCLBuffer(const Image *image, % MagickCLDevice device,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o device: the device to use. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image, MagickCLDevice device,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; cl_int status; assert(image != (const Image *) NULL); assert(device != (const MagickCLDevice) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info->type == UndefinedCache) SyncImagePixelCache((Image *) image,exception); if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse)) return((cl_mem) NULL); if ((cache_info->opencl != (MagickCLCacheInfo) NULL) && (cache_info->opencl->device->context != device->context)) cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); if (cache_info->opencl == (MagickCLCacheInfo) NULL) { assert(cache_info->pixels != (Quantum *) NULL); cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels, cache_info->length); if (cache_info->opencl == (MagickCLCacheInfo) NULL) return((cl_mem) NULL); } assert(cache_info->opencl->pixels == cache_info->pixels); return(cache_info->opencl->buffer); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (Quantum *) NULL) return((Quantum *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); if (cache_info->metacontent_extent != 0) if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % Quantum *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static Quantum *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated % corresponding with the last call to QueueAuthenticPixels() or % GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % Quantum *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Quantum *GetAuthenticPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a Quantum array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image has corresponding metacontent,call % GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the % meta-content corresponding to the region. Once the Quantum array has % been updated, the changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % Quantum *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *magick_restrict image) { const CacheInfo *magick_restrict cache_info; const PixelChannelMap *magick_restrict p, *magick_restrict q; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; p=image->channel_map; q=cache_info->channel_map; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->alpha_trait != cache_info->alpha_trait) || (image->read_mask != cache_info->read_mask) || (image->write_mask != cache_info->write_mask) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (image->number_channels != cache_info->number_channels) || (memcmp(p,q,image->number_channels*sizeof(*p)) != 0) || (image->metacontent_extent != cache_info->metacontent_extent) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cache_timelimit = MagickResourceInfinity, cpu_throttle = MagickResourceInfinity, cycles = 0; status=MagickTrue; if (cpu_throttle == MagickResourceInfinity) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != 0) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (cache_epoch == 0) { /* Set the expire time in seconds. */ cache_timelimit=GetMagickResourceLimit(TimeResource); cache_epoch=time((time_t *) NULL); } if ((cache_timelimit != MagickResourceInfinity) && ((MagickSizeType) (time((time_t *) NULL)-cache_epoch) >= cache_timelimit)) { #if defined(ECANCELED) errno=ECANCELED; #endif ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } LockSemaphoreInfo(image->semaphore); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AcquireSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status != MagickFalse) { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status != MagickFalse) { destroy=MagickTrue; image->cache=clone_image.cache; } } RelinquishSemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->type == DiskCache) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MemoryCache, MapCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyPixel(const Image *image, const Quantum *source,Quantum *destination) { register ssize_t i; if (source == (const Quantum *) NULL) { destination[RedPixelChannel]=ClampToQuantum(image->background_color.red); destination[GreenPixelChannel]=ClampToQuantum( image->background_color.green); destination[BluePixelChannel]=ClampToQuantum( image->background_color.blue); destination[BlackPixelChannel]=ClampToQuantum( image->background_color.black); destination[AlphaPixelChannel]=ClampToQuantum( image->background_color.alpha); return(MagickFalse); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); destination[channel]=source[i]; } return(MagickTrue); } MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; register Quantum *magick_restrict q; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y, pixel,exception)); q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,Quantum *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id], exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixelInfo() method is: % % MagickBooleanType GetOneVirtualPixelInfo(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelInfo *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelInfo *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); GetPixelInfo(image,pixel); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (p == (const Quantum *) NULL) return(MagickFalse); GetPixelInfoPixel(image,p,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the class type of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheFilename() returns the filename associated with the pixel % cache. % % The format of the GetPixelCacheFilename() method is: % % const char *GetPixelCacheFilename(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const char *GetPixelCacheFilename(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->cache_filename); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) ResetMagickMemory(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_metacontent_from_handler= GetVirtualMetacontentFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_metacontent_from_handler= GetAuthenticMetacontentFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated % corresponding with the last call to SetPixelCacheNexusPixels() or % GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=cache_info->length; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimized cache tile width in pixels. % % o height: the optimized cache tile height in pixels. % */ MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *width=2048UL/(cache_info->number_channels*sizeof(Quantum)); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/(cache_info->number_channels*sizeof(Quantum)); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromCache() returns the meta-content corresponding with % the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualMetacontentFromCache() method is: % % void *GetVirtualMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const void *GetVirtualMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromNexus() returns the meta-content for the specified % cache nexus. % % The format of the GetVirtualMetacontentFromNexus() method is: % % const void *GetVirtualMetacontentFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the meta-content. % */ MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((void *) NULL); return(nexus_info->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontent() returns the virtual metacontent corresponding with % the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the meta-content are not available. % % The format of the GetVirtualMetacontent() method is: % % const void *GetVirtualMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const void *GetVirtualMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image); if (metacontent != (void *) NULL) return(metacontent); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsFromNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelsFromNexus() method is: % % Quantum *GetVirtualPixelsFromNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; /* Compute the remainder of dividing offset by extent. It returns not only the quotient (tile the offset falls in) but also the positive remainer within that tile such that 0 <= remainder < extent. This method is essentially a ldiv() using a floored modulo division rather than the normal default truncated modulo division. */ modulo.quotient=offset/(ssize_t) extent; if (offset < 0L) modulo.quotient--; modulo.remainder=offset-modulo.quotient*(ssize_t) extent; return(modulo); } MagickPrivate const Quantum *GetVirtualPixelsFromNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo **magick_restrict virtual_nexus; Quantum *magick_restrict pixels, virtual_pixel[MaxPixelChannels]; RectangleInfo region; register const Quantum *magick_restrict p; register const void *magick_restrict r; register Quantum *magick_restrict q; register ssize_t i, u; register unsigned char *magick_restrict s; ssize_t v; void *magick_restrict virtual_metacontent; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const Quantum *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region,nexus_info, exception); if (pixels == (Quantum *) NULL) return((const Quantum *) NULL); q=pixels; offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(q); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); if (cache_info->metacontent_extent != 0) { status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); } return(q); } /* Pixel request is outside cache extents. */ s=(unsigned char *) nexus_info->metacontent; virtual_nexus=AcquirePixelCacheNexus(1); if (virtual_nexus == (NexusInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) ResetMagickMemory(virtual_pixel,0,cache_info->number_channels* sizeof(*virtual_pixel)); virtual_metacontent=(void *) NULL; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: case EdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: { if (cache_info->metacontent_extent != 0) { /* Acquire a metacontent buffer. */ virtual_metacontent=(void *) AcquireQuantumMemory(1, cache_info->metacontent_extent); if (virtual_metacontent == (void *) NULL) { virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); (void) ThrowMagickException(exception,GetMagickModule(), CacheError,"UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) ResetMagickMemory(virtual_metacontent,0, cache_info->metacontent_extent); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case GrayVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange/2, virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case TransparentVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,TransparentAlpha,virtual_pixel); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } default: { SetPixelRed(image,ClampToQuantum(image->background_color.red), virtual_pixel); SetPixelGreen(image,ClampToQuantum(image->background_color.green), virtual_pixel); SetPixelBlue(image,ClampToQuantum(image->background_color.blue), virtual_pixel); SetPixelBlack(image,ClampToQuantum(image->background_color.black), virtual_pixel); SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha), virtual_pixel); break; } } break; } default: break; } for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=virtual_pixel; r=virtual_metacontent; break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=virtual_pixel; r=virtual_metacontent; break; } p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } } if (p == (const Quantum *) NULL) break; (void) memcpy(q,p,(size_t) length*cache_info->number_channels* sizeof(*p)); q+=cache_info->number_channels; if ((s != (void *) NULL) && (r != (const void *) NULL)) { (void) memcpy(s,r,(size_t) cache_info->metacontent_extent); s+=cache_info->metacontent_extent; } continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,*virtual_nexus,exception); if (p == (const Quantum *) NULL) break; r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); (void) memcpy(q,p,(size_t) length*cache_info->number_channels*sizeof(*p)); q+=length*cache_info->number_channels; if ((r != (void *) NULL) && (s != (const void *) NULL)) { (void) memcpy(s,r,(size_t) length); s+=length*cache_info->metacontent_extent; } } if (u < (ssize_t) columns) break; } /* Free resources. */ if (virtual_metacontent != (void *) NULL) virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent); virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); if (v < (ssize_t) rows) return((const Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const Quantum *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const Quantum *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const Quantum *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const Quantum *GetVirtualPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % access the meta-content (of type void) corresponding to the the % region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const Quantum *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const Quantum *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated corresponding with the % last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % Quantum *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const Quantum *GetVirtualPixelsCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const Quantum *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((Quantum *) NULL); return((const Quantum *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % metacontent, and memory mapping the cache if it is disk based. The cache % nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if ((cache_info->file != -1) && (cache_info->mode == mode)) return(MagickTrue); /* cache already open and in the proper mode */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); cache_info->file=file; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *magick_restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MagickPathExtent], message[MagickPathExtent]; (void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format); (void) FormatLocaleString(message,MagickPathExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) count=(MagickOffsetType) 1; else { extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); if (count != 1) return(MagickFalse); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) (void) posix_fallocate(cache_info->file,offset+1,extent-offset); #endif } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET); if (offset < 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, source_info; char format[MagickPathExtent], message[MagickPathExtent]; const char *type; MagickBooleanType status; MagickSizeType length, number_pixels; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (cache_anonymous_memory < 0) { char *value; /* Does the security policy require anonymous mapping for pixel cache? */ cache_anonymous_memory=0; value=GetPolicyValue("pixel-cache-memory"); if (value == (char *) NULL) value=GetPolicyValue("cache:memory-map"); if (LocaleCompare(value,"anonymous") == 0) { #if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS) cache_anonymous_memory=1; #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn", "'%s' (policy requires anonymous memory mapping)",image->filename); #endif } value=DestroyString(value); } if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if ((AcquireMagickResource(WidthResource,image->columns) == MagickFalse) || (AcquireMagickResource(HeightResource,image->rows) == MagickFalse)) ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit", image->filename); source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]", image->filename,(double) GetImageIndexInList(image)); cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->alpha_trait=image->alpha_trait; cache_info->read_mask=image->read_mask; cache_info->write_mask=image->write_mask; cache_info->rows=image->rows; cache_info->columns=image->columns; InitializePixelChannelMap(image); cache_info->number_channels=GetPixelChannels(image); (void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels* sizeof(*image->channel_map)); cache_info->metacontent_extent=image->metacontent_extent; cache_info->mode=mode; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=cache_info->number_channels*sizeof(Quantum); if (image->metacontent_extent != 0) packet_size+=cache_info->metacontent_extent; length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; if (image->ping != MagickFalse) { cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->type=PingCache; return(MagickTrue); } status=AcquireMagickResource(AreaResource,cache_info->length); if (cache_info->mode == PersistMode) status=MagickFalse; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (((cache_info->type == UndefinedCache) && (status != MagickFalse)) || (cache_info->type == MemoryCache)) { status=MagickTrue; if (cache_anonymous_memory <= 0) { cache_info->mapped=MagickFalse; cache_info->pixels=(Quantum *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); } else { cache_info->mapped=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } if (cache_info->pixels == (Quantum *) NULL) cache_info->pixels=source_info.pixels; else { /* Create memory pixel cache. */ cache_info->type=MemoryCache; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ number_pixels*cache_info->number_channels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } RelinquishMagickResource(MemoryResource,cache_info->length); } /* Create pixel cache on disk. */ status=AcquireMagickResource(DiskResource,cache_info->length); if ((status == MagickFalse) || (cache_info->type == DistributedCache)) { DistributeCacheInfo *server_info; if (cache_info->type == DistributedCache) RelinquishMagickResource(DiskResource,cache_info->length); server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ status=MagickTrue; cache_info->type=DistributedCache; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MagickPathExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, GetDistributeCacheFile((DistributeCacheInfo *) cache_info->server_info),type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } RelinquishMagickResource(DiskResource,cache_info->length); (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) && (cache_info->mode != PersistMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { RelinquishMagickResource(DiskResource,cache_info->length); ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if (length != (MagickSizeType) ((size_t) length)) cache_info->type=DiskCache; else { status=AcquireMagickResource(MapResource,cache_info->length); if ((status == MagickFalse) && (cache_info->type != MapCache) && (cache_info->type != MemoryCache)) { status=MagickTrue; cache_info->type=DiskCache; } else { status=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (Quantum *) NULL) { cache_info->type=DiskCache; cache_info->pixels=source_info.pixels; } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ number_pixels*cache_info->number_channels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels, format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } RelinquishMagickResource(MapResource,cache_info->length); } status=MagickTrue; if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, *magick_restrict clone_info; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MagickPathExtent); cache_info->type=DiskCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(MagickTrue); } /* Clone persistent pixel cache. */ clone_info=(CacheInfo *) ClonePixelCache(cache_info); clone_info->type=DiskCache; (void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent); clone_info->file=(-1); clone_info->storage_class=cache_info->storage_class; clone_info->colorspace=cache_info->colorspace; clone_info->alpha_trait=cache_info->alpha_trait; clone_info->read_mask=cache_info->read_mask; clone_info->write_mask=cache_info->write_mask; clone_info->rows=cache_info->rows; clone_info->columns=cache_info->columns; clone_info->number_channels=cache_info->number_channels; clone_info->metacontent_extent=cache_info->metacontent_extent; clone_info->mode=PersistMode; clone_info->length=cache_info->length; (void) memcpy(clone_info->channel_map,cache_info->channel_map, MaxPixelChannels*sizeof(*cache_info->channel_map)); clone_info->offset=(*offset); status=ClonePixelCacheRepository(clone_info,cache_info,exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; Quantum *magick_restrict pixels; RectangleInfo region; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((Quantum *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((Quantum *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((Quantum *) NULL); /* Return pixel cache. */ region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,&region,nexus_info, exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a Quantum array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % obtain the meta-content (of type void) corresponding to the region. % Once the Quantum (and/or Quantum) array has been updated, the % changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y, columns,rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheMetacontent() reads metacontent from the specified region of % the pixel cache. % % The format of the ReadPixelCacheMetacontent() method is: % % MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the metacontent. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheMetacontent( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register ssize_t y; register unsigned char *magick_restrict q; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; q=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict p; /* Read meta-content from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->metacontent_extent*cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } break; } case DiskCache: { /* Read meta content from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read metacontent from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register Quantum *magick_restrict q; register ssize_t y; size_t number_channels, rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns; if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y) return(MagickFalse); offset+=nexus_info->region.x; number_channels=cache_info->number_channels; length=(MagickSizeType) number_channels*nexus_info->region.width* sizeof(Quantum); if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width) return(MagickFalse); rows=nexus_info->region.height; extent=length*rows; if ((extent == 0) || ((extent/length) != rows)) return(MagickFalse); y=0; q=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+offset*cache_info->number_channels; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*q),length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickPrivate Cache ReferencePixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheChannels() resets the pixel cache channels. % % The format of the ResetPixelCacheChannels method is: % % void ResetPixelCacheChannels(Image *) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate void ResetPixelCacheChannels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); cache_info->number_channels=GetPixelChannels(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e E p o c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheEpoch() resets the pixel cache epoch. % % The format of the ResetPixelCacheEpoch method is: % % void ResetPixelCacheEpoch(void) % */ MagickPrivate void ResetPixelCacheEpoch(void) { cache_epoch=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *magick_restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_metacontent_from_handler != (GetVirtualMetacontentFromHandler) NULL) cache_info->methods.get_virtual_metacontent_from_handler= cache_methods->get_virtual_metacontent_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) cache_info->methods.get_authentic_metacontent_from_handler= cache_methods->get_authentic_metacontent_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % Quantum SetPixelCacheNexusPixels(const CacheInfo *cache_info, % const MapMode mode,const RectangleInfo *region,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o region: A pointer to the RectangleInfo structure that defines the % region of this particular cache nexus. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *magick_restrict cache_info,NexusInfo *nexus_info, ExceptionInfo *exception) { if (nexus_info->length != (MagickSizeType) ((size_t) nexus_info->length)) return(MagickFalse); if (cache_anonymous_memory <= 0) { nexus_info->mapped=MagickFalse; nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) nexus_info->length)); if (nexus_info->cache != (Quantum *) NULL) (void) ResetMagickMemory(nexus_info->cache,0,(size_t) nexus_info->length); } else { nexus_info->mapped=MagickTrue; nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) nexus_info->length); } if (nexus_info->cache == (Quantum *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } return(MagickTrue); } static inline MagickBooleanType IsPixelCacheAuthentic( const CacheInfo *magick_restrict cache_info, const NexusInfo *magick_restrict nexus_info) { MagickBooleanType status; MagickOffsetType offset; /* Does nexus pixels point directly to in-core cache pixels or is it buffered? */ if (cache_info->type == PingCache) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; status=nexus_info->pixels == (cache_info->pixels+offset* cache_info->number_channels) ? MagickTrue : MagickFalse; return(status); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels,0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels,1,1); } static Quantum *SetPixelCacheNexusPixels(const CacheInfo *cache_info, const MapMode mode,const RectangleInfo *region,NexusInfo *nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((Quantum *) NULL); if ((region->width == 0) || (region->height == 0)) return((Quantum *) NULL); nexus_info->region=(*region); number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; if (number_pixels == 0) return((Quantum *) NULL); if ((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) { ssize_t x, y; x=nexus_info->region.x+(ssize_t) nexus_info->region.width-1; y=nexus_info->region.y+(ssize_t) nexus_info->region.height-1; if (((nexus_info->region.x >= 0) && (x < (ssize_t) cache_info->columns) && (nexus_info->region.y >= 0) && (y < (ssize_t) cache_info->rows)) && ((nexus_info->region.height == 1UL) || ((nexus_info->region.x == 0) && ((nexus_info->region.width == cache_info->columns) || ((nexus_info->region.width % cache_info->columns) == 0))))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; nexus_info->pixels=cache_info->pixels+cache_info->number_channels* offset; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(unsigned char *) cache_info->metacontent+ offset*cache_info->metacontent_extent; PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsPixelCacheAuthentic(cache_info, nexus_info); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ length=number_pixels*cache_info->number_channels*sizeof(Quantum); if (cache_info->metacontent_extent != 0) length+=number_pixels*cache_info->metacontent_extent; if (nexus_info->cache == (Quantum *) NULL) { nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((Quantum *) NULL); } } else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((Quantum *) NULL); } } nexus_info->pixels=nexus_info->cache; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(void *) (nexus_info->pixels+number_pixels* cache_info->number_channels); PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsPixelCacheAuthentic(cache_info, nexus_info); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; CacheView *magick_restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } status=SyncCacheViewAuthenticPixels(image_view,exception); } image_view=DestroyCacheView(image_view); return(status); } MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); break; } case TransparentVirtualPixelMethod: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); break; } default: break; } return(method); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have % been completed and updates the host memory. % % The format of the SyncAuthenticOpenCLBuffer() method is: % % void SyncAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info) { assert(cache_info != (CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->type != MemoryCache) || (cache_info->opencl == (MagickCLCacheInfo) NULL)) return; /* Ensure single threaded access to OpenCL environment. */ LockSemaphoreInfo(cache_info->semaphore); cache_info->opencl=(MagickCLCacheInfo) CopyMagickCLCacheInfo( cache_info->opencl); UnlockSemaphoreInfo(cache_info->semaphore); } MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); cache_info=(CacheInfo *) image->cache; CopyOpenCLBuffer(cache_info); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) { image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickCoreSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->metacontent_extent != 0) && (WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (status != MagickFalse) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) { status=cache_info->methods.sync_authentic_pixels_handler(image, exception); return(status); } assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheMetacontent() writes the meta-content to the specified region % of the pixel cache. % % The format of the WritePixelCacheMetacontent() method is: % % MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the meta-content. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const unsigned char *magick_restrict p; register ssize_t y; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=(MagickSizeType) length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict q; /* Write associated pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width*cache_info->metacontent_extent; q+=cache_info->columns*cache_info->metacontent_extent; } break; } case DiskCache: { /* Write associated pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write metacontent to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const Quantum *magick_restrict p; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width* sizeof(Quantum); extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+offset*cache_info->number_channels; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*nexus_info->region.width; q+=cache_info->columns*cache_info->number_channels; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*p),length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }