source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
sieveMulticore.c | /*
Tempos de execução:
Versão sequencial:
real 0m3.005s
user 0m2.919s
sys 0m0.080s
real 0m3.006s
user 0m2.930s
sys 0m0.068s
real 0m3.028s
user 0m2.951s
sys 0m0.064s
real 0m3.007s
user 0m2.926s
sys 0m0.076s
real 0m3.006s
user 0m2.920s
sys 0m0.080s
Versão paralela multicore:
real 0m2.579s
user 0m9.820s
sys 0m0.080s
real 0m2.573s
user 0m9.845s
sys 0m0.068s
real 0m2.579s
user 0m9.838s
sys 0m0.080s
real 0m2.576s
user 0m9.830s
sys 0m0.080s
real 0m2.580s
user 0m9.840s
sys 0m0.084s
Versão paralela para GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <math.h>
#include <omp.h>
int sieveOfEratosthenes(int n)
{
// Create a boolean array "prime[0..n]" and initialize
// all entries it as true. A value in prime[i] will
// finally be false if i is Not a prime, else true.
int primes = 0;
bool *prime = (bool*) malloc((n+1)*sizeof(bool));
int sqrt_n = sqrt(n);
memset(prime, true,(n+1)*sizeof(bool));
int i, p;
#pragma omp parallel for schedule (guided)
for (p=2; p <= sqrt_n; p++)
{
// If prime[p] is not changed, then it is a prime
if (prime[p] == true)
{
// Update all multiples of p
#pragma omp parallel for
for(i=p*2; i<=n; i += p)
prime[i] = false;
}
}
// count prime numbers
#pragma omp parallel for reduction(+:primes)
for (int p=2; p<=n; p++)
if (prime[p])
primes++;
return(primes);
}
int main()
{
int n = 100000000;
double start;
double end;
start = omp_get_wtime();
printf("%d\n",sieveOfEratosthenes(n));
end = omp_get_wtime();
printf("Total time: %f seconds\n", end - start);
return 0;
}
|
kvstore_dist_server.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright 2019 ByteDance Inc. or its affiliates. All Rights Reserved.
* Copyright (c) 2015 by Contributors
* \file mxnet_node.h
* \brief implement mxnet nodes
*/
#ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#include <mxnet/c_api.h>
#include <mxnet/kvstore.h>
#include <ps/ps.h>
#include <queue>
#include <string>
#include <mutex>
#include <condition_variable>
#include <memory>
#include <functional>
#include <future>
#include <vector>
#include <fstream>
#include <chrono>
#include <atomic>
#include "../profiler/profiler.h"
#include "../operator/tensor/elemwise_binary_op-inl.h"
#include "../operator/tensor/init_op.h"
#include "cpu_reducer.h"
namespace mxnet {
namespace kvstore {
// maintain same order in frontend.
enum class CommandType {
kController, kSetMultiPrecision, kStopServer, kSyncMode,
kSetGradientCompression, kSetProfilerParams
};
enum class RequestType {
kDefaultPushPull, kRowSparsePushPull, kCompressedPushPull
};
struct DataHandleType {
RequestType requestType;
int dtype;
};
/*!
* Uses Cantor pairing function to generate a unique number given two numbers.
* This number can also be inverted to find the unique pair whose Cantor value is this number.
* Ref: https://en.wikipedia.org/wiki/Pairing_function#Cantor_pairing_function
* \param requestType RequestType
* \param dtype integer
* \return Cantor value of arguments
*/
static int GetCommandType(RequestType requestType, int d) {
int m = static_cast<int>(requestType);
return (((m + d) * (m + d + 1)) / 2) + d;
}
/*!
* Unpairs Cantor value and finds the two integers used to pair.
* Then returns DataHandleType object with those numbers.
* \param cmd DataHandleCommand generated by GetCommandType function
* \return DataHandleType
*/
static DataHandleType DepairDataHandleType(int cmd) {
int w = std::floor((std::sqrt(8 * cmd + 1) - 1)/2);
int t = ((w * w) + w) / 2;
int y = cmd - t;
int x = w - y;
CHECK_GE(x, 0);
CHECK_GE(y, 0);
DataHandleType type;
type.requestType = static_cast<RequestType>(x);
type.dtype = y;
return type;
}
/**
* \brief executor runs a function using the thread called \ref Start
*/
class Executor {
public:
/**
* \brief start the executor
*/
void Start() {
std::unique_lock<std::mutex> lk(mu_);
while (true) {
cond_.wait(lk, [this]{return !queue_.empty();});
Block blk = std::move(queue_.front());
queue_.pop();
lk.unlock();
if (blk.f) {
blk.f();
blk.p->set_value();
} else {
blk.p->set_value(); break;
}
lk.lock();
}
}
/**
* \brief function
*/
typedef std::function<void()> Func;
/**
* \brief let the thread called \ref Start to exec a function. threadsafe
*/
void Exec(const Func& func) {
Block blk(func);
auto fut = blk.p->get_future();
{
std::lock_guard<std::mutex> lk(mu_);
queue_.push(std::move(blk));
cond_.notify_one();
}
fut.wait();
}
/**
* \brief stop the thread, threadsafe
*/
void Stop() {
Exec(Func());
}
private:
struct Block {
explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { }
Func f;
std::shared_ptr<std::promise<void>> p;
};
std::queue<Block> queue_;
std::mutex mu_;
std::condition_variable cond_;
};
class KVStoreDistServer {
public:
KVStoreDistServer() {
using namespace std::placeholders;
ps_server_ = new ps::KVServer<char>(0);
enable_pull_zero_copy_ = dmlc::GetEnv("ENABLE_PULL_ZERO_COPY", true);
if (enable_pull_zero_copy_) {
LOG(INFO) << "Enable zero copy of pull operations.";
}
log_key_info_ = dmlc::GetEnv("PS_KEY_LOG", false);
if (log_key_info_) {
LOG(INFO) << "Log key information at PS";
}
static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle(
std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2));
ps_server_->set_request_handle(
std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3));
gradient_compression_ = std::make_shared<GradientCompression>();
log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false);
update_buf_wait_ = dmlc::GetEnv("PS_ENABLE_GRADIENT_WAIT", false);
sync_mode_ = !dmlc::GetEnv("BYTEPS_ENABLE_ASYNC", false);
if (!sync_mode_) {
LOG(INFO) << "BytePS server is enabled asynchronous training";
}
LOG(INFO) << "---------------- Add this line to verify it is working ----------------";
}
~KVStoreDistServer() {
profiler::Profiler::Get()->SetState(profiler::Profiler::ProfilerState(0));
delete ps_server_;
}
void set_controller(const KVStore::Controller& controller) {
CHECK(controller);
controller_ = controller;
}
void set_updater(const KVStore::Updater& updater) {
CHECK(updater);
updater_ = updater;
}
/**
* \brief blocked until received the command \a kSyncMode
*/
void Run() {
exec_.Start();
}
private:
struct UpdateBuf {
std::vector<ps::KVMeta> request;
NDArray merged;
// temp_array is used to cast received values as float32 for computation if required
NDArray temp_array;
};
void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) {
CommandType recved_type = static_cast<CommandType>(recved.head);
switch (recved_type) {
case CommandType::kStopServer:
exec_.Stop();
break;
case CommandType::kSyncMode:
CHECK(0) << "kSyncMode is not available now";
break;
case CommandType::kSetGradientCompression:
gradient_compression_->DecodeParams(recved.body);
break;
case CommandType::kSetProfilerParams:
// last char is the type of profiler command
ProcessServerProfilerCommands(static_cast<KVStoreServerProfilerCommand>
(recved.body.back() - '0'),
recved.body);
break;
case CommandType::kSetMultiPrecision:
// uses value 1 for message id from frontend
CHECK(0) << "kSetMultiPrecision is not available now";
break;
case CommandType::kController:
// this uses value 0 for message id from frontend
// let the main thread to execute ctrl, which is necessary for python
exec_.Exec([this, recved]() {
CHECK(controller_);
controller_(recved.head, recved.body);
});
break;
}
app->Response(recved);
}
/*
* For keys already initialized, if necessary create stored_realt.
* This will only be used if by some wrong usage of kvstore,
* some keys are initialized before optimizer is set.
*/
void CreateMultiPrecisionCopies() {
for (auto const &stored_entry : store_) {
const int key = stored_entry.first;
const NDArray &stored = stored_entry.second;
if (stored.dtype() != mshadow::kFloat32) {
auto &stored_realt = store_realt_[key];
if (stored.storage_type() == kRowSparseStorage) {
stored_realt = NDArray(kRowSparseStorage, stored.shape(), stored.ctx(),
true, mshadow::kFloat32);
} else {
stored_realt = NDArray(stored.shape(), stored.ctx(), false, mshadow::kFloat32);
}
auto &update = update_buf_[key];
if (!update.merged.is_none()) {
if (update.merged.storage_type() == kRowSparseStorage) {
update.merged = NDArray(kRowSparseStorage, update.merged.shape(), update.merged.ctx(),
true, mshadow::kFloat32);
} else {
update.merged = NDArray(update.merged.shape(), update.merged.ctx(), false,
mshadow::kFloat32);
}
}
CHECK(update.request.size() == 0)
<< ps::MyRank() << "Multiprecision mode can not be set while pushes are underway."
<< "Please set optimizer before pushing keys." << key << " " << update.request.size();
CopyFromTo(stored, stored_realt);
}
}
for (auto const &stored_realt_entry : store_realt_) {
stored_realt_entry.second.WaitToRead();
}
}
void ProcessServerProfilerCommands(KVStoreServerProfilerCommand type, const std::string& body) {
switch (type) {
case KVStoreServerProfilerCommand::kSetConfig:
SetProfilerConfig(body.substr(0, body.size() - 1));
break;
case KVStoreServerProfilerCommand::kState:
MXSetProfilerState(static_cast<int>(body.front() - '0'));
break;
case KVStoreServerProfilerCommand::kPause:
MXProfilePause(static_cast<int>(body.front() - '0'));
break;
case KVStoreServerProfilerCommand::kDump:
MXDumpProfile(static_cast<int>(body.front() - '0'));
break;
}
}
void SetProfilerConfig(std::string params_str) {
std::vector<std::string> elems;
mxnet::kvstore::split(params_str, ',', std::back_inserter(elems));
std::vector<const char*> ckeys;
std::vector<const char*> cvals;
ckeys.reserve(elems.size());
cvals.reserve(elems.size());
for (size_t i=0; i < elems.size(); i++) {
std::vector<std::string> parts;
mxnet::kvstore::split(elems[i], ':', std::back_inserter(parts));
CHECK_EQ(parts.size(), 2) << "Improper profiler config passed from worker";
CHECK(!parts[0].empty()) << "ProfilerConfig parameter is empty";
CHECK(!parts[1].empty()) << "ProfilerConfig value is empty for parameter "<< parts[0];
if (parts[0] == "filename") {
parts[1] = "rank" + std::to_string(ps::MyRank()) + "_" + parts[1];
}
char* ckey = new char[parts[0].length() + 1];
std::snprintf(ckey, parts[0].length() + 1, "%s", parts[0].c_str());
ckeys.push_back(ckey);
char* cval = new char[parts[1].length() + 1];
std::snprintf(cval, parts[1].length() + 1, "%s", parts[1].c_str());
cvals.push_back(cval);
}
MXSetProfilerConfig(elems.size(), &ckeys[0], &cvals[0]);
for (size_t i=0; i < ckeys.size(); i++) {
delete[] ckeys[i];
delete[] cvals[i];
}
}
void DataHandleEx(const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
DataHandleType type = DepairDataHandleType(req_meta.cmd);
switch (type.requestType) {
case RequestType::kRowSparsePushPull:
DataHandleRowSparse(type, req_meta, req_data, server);
break;
case RequestType::kCompressedPushPull:
DataHandleCompressed(type, req_meta, req_data, server);
break;
case RequestType::kDefaultPushPull:
DataHandleDefault(type, req_meta, req_data, server);
break;
}
}
inline bool has_multi_precision_copy(const DataHandleType type) {
return multi_precision_ && type.dtype != mshadow::kFloat32;
}
inline void ApplyUpdates(const DataHandleType type, const uint64_t key,
UpdateBuf *update_buf, ps::KVServer<char>* server) {
if (!sync_mode_ || update_buf->request.size() == (size_t) ps::NumWorkers()) {
auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key];
auto& update = sync_mode_ ? update_buf->merged : update_buf->temp_array;
// NOTE: not sure whether we need this WaitToRead, default is disabled
if (update_buf_wait_) update_buf->merged.WaitToRead();
// async mode does not need this Copy
if (sync_mode_) CopyFromTo(update_buf->merged, &stored);
if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]);
update_buf->request.clear();
}
}
void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices,
const int64_t master_key, const int64_t num_rows) {
indices[0] = 0;
for (int64_t i = 1; i <= num_rows; i++) {
uint64_t key = DecodeKey(keys[i]);
auto row_id = key - master_key;
indices[i - 1] = row_id;
}
}
void AccumulateRowSparseGrads(const DataHandleType type,
const NDArray& recved,
UpdateBuf* updateBuf) {
NDArray out(kRowSparseStorage, updateBuf->merged.shape(), Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
if (has_multi_precision_copy(type)) CopyFromTo(recved, updateBuf->temp_array);
const NDArray& to_merge = has_multi_precision_copy(type) ? updateBuf->temp_array : recved;
// accumulate row_sparse gradients
using namespace mshadow;
Engine::Get()->PushAsync(
[to_merge, updateBuf, out](RunContext ctx, Engine::CallbackOnComplete on_complete) {
op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>(
{}, {}, {to_merge, updateBuf->merged}, {kWriteTo}, {out});
on_complete();
}, to_merge.ctx(), {to_merge.var(), updateBuf->merged.var()}, {out.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
CopyFromTo(out, &(updateBuf->merged), 0);
updateBuf->merged.WaitToRead();
}
void RowSparsePullResponse(const DataHandleType type,
const int master_key,
const size_t num_rows,
const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
if (log_verbose_) LOG(INFO) << "pull: " << master_key;
ps::KVPairs<char> response;
if (num_rows == 0) {
std::vector<int> lens(req_data.keys.size(), 0);
response.keys = req_data.keys;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
return;
}
const NDArray& stored = store_[master_key];
if (has_multi_precision_copy(type)) stored.WaitToRead();
CHECK(!stored.is_none()) << "init " << master_key << " first";
auto shape = stored.shape();
auto unit_len = shape.ProdShape(1, shape.ndim());
const int num_bytes = mshadow::mshadow_sizeof(type.dtype);
const int unit_size = unit_len * num_bytes;
const char* data = static_cast<char *> (stored.data().dptr_);
auto len = num_rows * unit_size;
// concat values
response.vals.resize(len);
#pragma omp parallel for
for (size_t i = 1; i <= num_rows; i++) {
uint64_t key = DecodeKey(req_data.keys[i]);
int64_t row_id = key - master_key;
const auto src = data + row_id * unit_size;
auto begin = (i - 1) * unit_size;
auto end = i * unit_size;
response.vals.segment(begin, end).CopyFrom(src, unit_size);
}
// setup response
response.keys = req_data.keys;
std::vector<int> lens(req_data.keys.size(), unit_len);
lens[0] = 0;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
}
void InitRowSparseStored(const DataHandleType type,
const int master_key,
const size_t num_rows,
const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
auto& stored = has_multi_precision_copy(type) ? store_realt_[master_key] : store_[master_key];
int dtype = type.dtype;
int num_bytes = mshadow::mshadow_sizeof(dtype);
auto unit_len = req_data.lens[1] / num_bytes;
CHECK_GT(unit_len, 0);
size_t ds[] = {num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
CHECK_EQ(req_data.vals.size(), num_rows * unit_len * num_bytes);
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask);
})
NDArray recved = NDArray(recv_blob, 0);
stored = NDArray(kRowSparseStorage, dshape, Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
if (has_multi_precision_copy(type)) {
store_[master_key] = NDArray(kRowSparseStorage, dshape, Context(), true, type.dtype);
}
Engine::Get()->PushAsync(
[this, recved, stored, type](RunContext ctx, Engine::CallbackOnComplete on_complete) {
NDArray rsp = stored;
stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])});
mshadow::Stream<cpu> *s = ctx.get_stream<cpu>();
using namespace mxnet::op;
nnvm::dim_t nnr = rsp.shape()[0];
MSHADOW_IDX_TYPE_SWITCH(rsp.aux_type(rowsparse::kIdx), IType, {
IType* idx = rsp.aux_data(rowsparse::kIdx).dptr<IType>();
mxnet_op::Kernel<PopulateFullIdxRspKernel, cpu>::Launch(s, nnr, idx);
});
TBlob rsp_data = rsp.data();
// copies or casts as appropriate
ndarray::Copy<cpu, cpu>(recved.data(), &rsp_data, Context(), Context(), RunContext());
on_complete();
}, recved.ctx(), {recved.var()}, {stored.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
if (has_multi_precision_copy(type)) {
CopyFromTo(stored, store_[master_key]);
store_[master_key].WaitToRead();
}
stored.WaitToRead();
server->Response(req_meta);
}
void DataHandleRowSparse(const DataHandleType type, const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
uint64_t master_key = DecodeKey(req_data.keys[0]);
auto num_rows = req_data.keys.size() - 1;
auto& stored = store_[master_key];
if (req_meta.push) {
CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty";
CHECK_EQ(req_data.lens[0], 0);
if (stored.is_none()) {
if (log_verbose_) LOG(INFO) << "initial push: " << master_key;
// initialization
CHECK_GT(num_rows, 0) << "init with empty data is not supported";
InitRowSparseStored(type, master_key, num_rows, req_meta, req_data, server);
return;
} else {
if (log_verbose_) LOG(INFO) << "push: " << master_key << " " << req_data.keys;
auto& updates = update_buf_[master_key];
if (sync_mode_ && updates.merged.is_none()) {
updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
}
if (has_multi_precision_copy(type) && updates.temp_array.is_none()) {
updates.temp_array = NDArray(kRowSparseStorage, stored.shape(), Context(), false,
mshadow::kFloat32);
}
if (num_rows == 0) {
if (sync_mode_) {
if (updates.request.empty()) {
// reset to zeros
int merged_dtype = has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype;
updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(),
true, merged_dtype);
} // else nothing to aggregate
updates.request.push_back(req_meta);
ApplyUpdates(type, master_key, &updates, server);
} else {
server->Response(req_meta);
}
} else {
auto unit_len = req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype);
CHECK_GT(unit_len, 0);
// indices
std::vector<int64_t> indices(num_rows);
DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows);
// data
TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask);
size_t ds[] = {(size_t) num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()),
dshape, cpu::kDevMask);
})
// row_sparse NDArray
NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0);
if (updates.request.empty()) {
if (sync_mode_) {
CopyFromTo(recved, updates.merged);
} else {
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
} else {
updates.temp_array = recved;
}
}
} else {
CHECK(sync_mode_);
AccumulateRowSparseGrads(type, recved, &updates);
}
updates.request.push_back(req_meta);
ApplyUpdates(type, master_key, &updates, server);
}
}
} else {
// pull
RowSparsePullResponse(type, master_key, num_rows, req_meta, req_data, server);
}
}
std::unordered_map<int, ps::KVPairs<char> > server_response_map;
void DefaultStorageResponse(const DataHandleType type,
const int key,
const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
const NDArray& stored = store_[key];
CHECK(!stored.is_none()) << "init " << key << " first";
// as server returns when store_realt is ready in this case
if (has_multi_precision_copy(type)) stored.WaitToRead();
auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype());
// send pull response
auto iterator = server_response_map.find(key);
if (iterator==server_response_map.end()) { // new key
ps::KVPairs<char> response;
response.keys = req_data.keys;
response.lens = {len};
stored.WaitToRead();
if(enable_pull_zero_copy_) {
response.vals = ps::SArray<char>(static_cast<char*>(stored.data().dptr_), len, false); // enable zero copy
}
else {
response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len);
}
server_response_map[key] = response; // add to the map
server->Response(req_meta, response);
}
else { // not new key, then reuse the memory address to avoid ibv_reg_mr on RDMA data path
ps::KVPairs<char> *response = &iterator->second;
// keys and lens remain unchanged, just update vals
auto p = static_cast<char*>(stored.data().dptr_);
if(enable_pull_zero_copy_) {
Engine::Get()->PushAsync(
[this, server, req_meta, response, p, len](RunContext ctx, Engine::CallbackOnComplete on_complete) {
// enable zero copy
CHECK(p);
response->vals = ps::SArray<char>(p, len, false);
server->Response(req_meta, *response);
on_complete();
}, stored.ctx(), {stored.var()}, {},
FnProperty::kNormal, 0, "BYTEPS_SEND_PULL_RESPONSE"); // should avoid racing
}
else {
Engine::Get()->PushAsync(
[this, server, req_meta, response, p, len](RunContext ctx, Engine::CallbackOnComplete on_complete) {
response->vals.CopyFrom(static_cast<const char*>(p), len);
server->Response(req_meta, *response);
on_complete();
}, stored.ctx(), {stored.var()}, {},
FnProperty::kNormal, 0, "BYTEPS_SEND_PULL_RESPONSE"); // should avoid racing
}
}
}
void DataHandleCompressed(const DataHandleType type,
const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
CHECK_EQ(type.dtype, mshadow::kFloat32)
<< "Gradient compression is currently supported for fp32 only";
if (req_meta.push) {
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
// first for dummy key which represents original size of array, whose len is 0
CHECK_EQ(req_data.keys.size(), (size_t)2);
CHECK_EQ(req_data.lens.size(), (size_t)2);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]);
uint64_t original_size = DecodeKey(req_data.keys[0]);
uint64_t key = DecodeKey(req_data.keys[1]);
auto& stored = store_[key];
size_t ds[] = {(size_t)req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype)};
TShape dshape(ds, ds + 1);
TBlob recv_blob(reinterpret_cast<real_t*>(req_data.vals.data()), dshape, cpu::kDevMask);
NDArray recved = NDArray(recv_blob, 0);
NDArray decomp_buf = decomp_buf_[key];
dshape = TShape{(int64_t) original_size};
if (decomp_buf.is_none()) {
decomp_buf = NDArray(dshape, Context());
}
if (stored.is_none()) {
stored = NDArray(dshape, Context());
gradient_compression_->Dequantize(recved, &stored, 0);
server->Response(req_meta);
stored.WaitToRead();
} else if (sync_mode_) {
// synced push
auto& merged = update_buf_[key];
if (merged.merged.is_none()) {
merged.merged = NDArray(dshape, Context());
}
if (merged.request.size() == 0) {
gradient_compression_->Dequantize(recved, &merged.merged, 0);
} else {
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
merged.merged += decomp_buf;
}
merged.request.push_back(req_meta);
ApplyUpdates(type, key, &merged, server);
} else {
// async push
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
exec_.Exec([this, key, &decomp_buf, &stored]() {
CHECK(updater_);
updater_(key, decomp_buf, &stored);
});
server->Response(req_meta);
stored.WaitToRead();
}
} else { // pull
CHECK_EQ(req_data.keys.size(), (size_t)1);
CHECK_EQ(req_data.lens.size(), (size_t)0);
// temporarily comment this two lines, should revisit here if we use compression
// uint64_t key = DecodeKey(req_data.keys[0]);
// DefaultStorageResponse(type, key, req_meta, req_data, server);
}
}
void SendPushResponse(uint64_t key, const ps::KVMeta& req, ps::KVServer<char>* server){
auto iterator = push_response_map.find(key);
if (iterator==push_response_map.end()){ // new key
ps::KVPairs<char> response;
response.keys.push_back(key);
push_response_map[key] = response; // add to the map
server->Response(req, response);
}
else{ // not new key, then reuse the memory address to avoid ibv_reg_mr on RDMA data path
ps::KVPairs<char> *response = &iterator->second;
response->keys[0]=key;
server->Response(req, *response);
}
}
void DataHandleDefault(const DataHandleType type, const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
std::lock_guard<std::mutex> lock(engine_mu_);
// do some check
CHECK_EQ(req_data.keys.size(), (size_t)1);
if (req_meta.push) {
CHECK_EQ(req_data.lens.size(), (size_t)1);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]);
if (log_key_info_) LOG(INFO) << "push key=" << DecodeKey(req_data.keys[0])
<< "\t sender=" << req_meta.sender
<< "\t size=" << (size_t) req_data.lens[0];
} else {
if (log_key_info_) LOG(INFO) << "pull key=" << (uint64_t) DecodeKey(req_data.keys[0])
<< "\t sender=" << req_meta.sender;
}
uint64_t key = DecodeKey(req_data.keys[0]);
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
if (req_meta.push) {
auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key];
size_t ds[] = {(size_t) req_data.lens[0] / mshadow::mshadow_sizeof(type.dtype)};
TShape dshape(ds, ds + 1);
TBlob recv_blob;
MSHADOW_TYPE_SWITCH(type.dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask);
})
NDArray recved = NDArray(recv_blob, 0);
if (stored.is_none()) {
// buffer the request meta
auto &updates = update_buf_[key];
updates.request.push_back(req_meta);
if (updates.request.size() < (size_t) ps::NumWorkers()) return;
if (log_key_info_) {
LOG(INFO) << "Collected all " << updates.request.size()
<< " requests for key=" << key
<< ", init the store buffer size=" << (size_t) req_data.lens[0];
}
// initialization
stored = NDArray(dshape, Context(), false,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
CopyFromTo(recved, &stored, 0);
if (has_multi_precision_copy(type)) {
auto& stored_dtype = store_[key];
stored_dtype = NDArray(dshape, Context(), false, type.dtype);
CopyFromTo(stored, stored_dtype);
stored_dtype.WaitToRead();
}
// delay sending response until stored is ready
stored.WaitToRead();
for (const auto& req : updates.request) {
SendPushResponse(key, req, server);
}
updates.request.clear();
} else {
auto &updates = update_buf_[key];
if (sync_mode_ && updates.merged.is_none()) {
updates.merged = NDArray(dshape, Context(), false,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
}
if (has_multi_precision_copy(type) && updates.temp_array.is_none()) {
updates.temp_array = NDArray(dshape, Context(), false, mshadow::kFloat32);
}
if (updates.request.empty()) { // from the first incoming worker
if (sync_mode_) {
CopyFromTo(recved, updates.merged);
} else {
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
} else {
stored += recved;
}
}
} else { // from other workers
CHECK(sync_mode_);
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
updates.merged += updates.temp_array;
} else {
Engine::Get()->PushAsync(
[this, updates, recved](RunContext ctx, Engine::CallbackOnComplete on_complete) {
CHECK_GE(bps_reducer_.sum(bps_reducer_.GetData(&updates.merged), bps_reducer_.GetData(&recved),
bps_reducer_.GetSize(&recved), bps_reducer_.GetDType(&recved)), 0);
on_complete();
}, updates.merged.ctx(), {recved.var()}, {updates.merged.var()},
FnProperty::kCPUPrioritized, 0, "BYTEPS_SUMMATION");
}
}
// add a worker information (request.size() is the # workers received)
updates.request.push_back(req_meta);
SendPushResponse(key, req_meta, server);
ApplyUpdates(type, key, &updates, server);
}
} else {
auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key];
if (stored.is_none()) {
CHECK(0) << "Processing pull request when the NDArray of key " << key << " has not been inited yet, which is not expected.";
}
// process pull request
auto &updates = update_buf_[key];
DefaultStorageResponse(type, key, req_meta, req_data, server);
}
}
uint64_t DecodeKey(ps::Key key) {
auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()];
return key - kr.begin();
}
/**
* \brief user defined mode for push
*/
bool sync_mode_;
KVStore::Controller controller_;
KVStore::Updater updater_;
/**
* \brief store_ contains the value at kvstore for each key
*/
std::unordered_map<uint64_t, NDArray> store_;
std::unordered_map<uint64_t, NDArray> store_realt_;
/**
* \brief merge_buf_ is a buffer used if sync_mode is true. It represents
* values from different workers being merged. The store will be updated
* to this value when values from all workers are pushed into this buffer.
*/
std::unordered_map<uint64_t, UpdateBuf> update_buf_;
/**
* \brief decomp_buf_ is a buffer into which compressed values are
* decompressed before merging to the store. used when compress_!='none'
*/
std::unordered_map<uint64_t, NDArray> decomp_buf_;
Executor exec_;
ps::KVServer<char>* ps_server_;
// whether to LOG verbose information
bool log_verbose_;
// whether to LOG key trace
bool log_key_info_;
// whether to enable zero copy for pull request
bool enable_pull_zero_copy_;
/*
* \brief whether to use multi precision mode.
* in multi precision mode, all weights are stored as float32.
* any gradient received will be cast to float32 before accumulation and updating of weights.
*/
bool multi_precision_;
bool update_buf_wait_;
std::mutex engine_mu_;
CpuReducer bps_reducer_;
/*
* send push response with the key as value
*/
std::unordered_map<uint64_t, ps::KVPairs<char> > push_response_map;
/**
* \brief gradient compression object.
* starts with none, used after SetGradientCompression sets the type
* currently there is no support for unsetting gradient compression
*/
std::shared_ptr<kvstore::GradientCompression> gradient_compression_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
|
hello_thread.c | #include <stdio.h>
#include <omp.h>
int main(int argc, char *argv[]) {
int nthreads, tid;
/* Fork a team of threads with each thread having a private tid variable */
#pragma omp parallel private(tid)
{
/* Obtain and print thread id */
tid = omp_get_thread_num();
printf("Hello World from thread = %d\n", tid);
/* Only master thread does this */
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} /* All threads join master thread and terminate */
}
|
CompressNeighboursWorklet.h | //============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//
// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
// Copyright 2014 UT-Battelle, LLC.
// Copyright 2014 Los Alamos National Security.
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National
// Laboratory (LANL), the U.S. Government retains certain rights in
// this software.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_augmented_contourtree_mesh_inc_compress_neighbours_worklet_h
#define vtk_m_worklet_contourtree_augmented_contourtree_mesh_inc_compress_neighbours_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_augmented
{
namespace mesh_dem_contourtree_mesh_inc
{
class CompressNeighboursWorklet : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(FieldIn arcs, // (input) arcs
FieldIn arcTargetIndex, // (input) arcTargetIndex
WholeArrayOut neighbours); // (output) neighbours
typedef void ExecutionSignature(_1, InputIndex, _2, _3);
typedef _1 InputDomain;
// Default Constructor
VTKM_EXEC_CONT
CompressNeighboursWorklet() {}
template <typename OutFieldPortalType>
VTKM_EXEC void operator()(vtkm::Id& to,
vtkm::Id from,
vtkm::Id& arcTargetIndexFrom,
const OutFieldPortalType& neighboursPortal) const
{
if (!NoSuchElement(to))
{
neighboursPortal.Set(2 * arcTargetIndexFrom + 0, 2 * from + 0);
neighboursPortal.Set(2 * arcTargetIndexFrom + 1, 2 * from + 1);
}
// In serial this worklet implements the following operation
// #pragma omp parallel for
// for (indexVector::size_type from = 0; from < arcs.size(); ++from)
// {
// indexType to = arcs[from];
// if (!NoSuchElement(to))
// {
// assert(MaskedIndex(to) != from);
// neighbours[2*arcTargetIndex[from]+0] = 2*from+0;
// neighbours[2*arcTargetIndex[from]+1] = 2*from+1;
// }
}
}; // ComputeMaxNeighboursWorklet
} // namespace mesh_dem_contourtree_mesh_inc
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm
#endif
|
conv3x3s1_winograd64_transform_kernel_neon_pack.h | // in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv3x3s1_winograd64_transform_kernel_neon_pack(const Mat& kernel_tm, Mat& kernel_tm2, int inch, int outch)
{
// optimized layout for winograd4
// interleave weights
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
kernel_tm2.create(8*8 * inch * 4, 1, nn_outch + (outch % 4 + 3) / 4);
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
float* ktm2 = kernel_tm2.channel(pp);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
const Mat kernel2_tm = kernel_tm.channel(p+2);
const Mat kernel3_tm = kernel_tm.channel(p+3);
int q=0;
#if __ARM_NEON && __aarch64__
for (; q+3<inch; q+=4)
{
const float* k00 = kernel0_tm.row(q);
const float* k01 = kernel0_tm.row(q+1);
const float* k02 = kernel0_tm.row(q+2);
const float* k03 = kernel0_tm.row(q+3);
const float* k10 = kernel1_tm.row(q);
const float* k11 = kernel1_tm.row(q+1);
const float* k12 = kernel1_tm.row(q+2);
const float* k13 = kernel1_tm.row(q+3);
const float* k20 = kernel2_tm.row(q);
const float* k21 = kernel2_tm.row(q+1);
const float* k22 = kernel2_tm.row(q+2);
const float* k23 = kernel2_tm.row(q+3);
const float* k30 = kernel3_tm.row(q);
const float* k31 = kernel3_tm.row(q+1);
const float* k32 = kernel3_tm.row(q+2);
const float* k33 = kernel3_tm.row(q+3);
for (int r=0; r<16; r++)
{
// split into two asm blocks for gcc reject over 30 oprands :(
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"ld1 {v1.4s}, [%2], #16 \n"
"ld1 {v2.4s}, [%3], #16 \n"
"ld1 {v3.4s}, [%4], #16 \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"ld1 {v0.4s}, [%5], #16 \n"
"ld1 {v1.4s}, [%6], #16 \n"
"ld1 {v2.4s}, [%7], #16 \n"
"ld1 {v3.4s}, [%8], #16 \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k01), // %2
"=r"(k02), // %3
"=r"(k03), // %4
"=r"(k10), // %5
"=r"(k11), // %6
"=r"(k12), // %7
"=r"(k13) // %8
: "0"(ktm2),
"1"(k00),
"2"(k01),
"3"(k02),
"4"(k03),
"5"(k10),
"6"(k11),
"7"(k12),
"8"(k13)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"ld1 {v1.4s}, [%2], #16 \n"
"ld1 {v2.4s}, [%3], #16 \n"
"ld1 {v3.4s}, [%4], #16 \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"ld1 {v0.4s}, [%5], #16 \n"
"ld1 {v1.4s}, [%6], #16 \n"
"ld1 {v2.4s}, [%7], #16 \n"
"ld1 {v3.4s}, [%8], #16 \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
: "=r"(ktm2), // %0
"=r"(k20), // %1
"=r"(k21), // %2
"=r"(k22), // %3
"=r"(k23), // %4
"=r"(k30), // %5
"=r"(k31), // %6
"=r"(k32), // %7
"=r"(k33) // %8
: "0"(ktm2),
"1"(k20),
"2"(k21),
"3"(k22),
"4"(k23),
"5"(k30),
"6"(k31),
"7"(k32),
"8"(k33)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
}
#endif // __ARM_NEON && __aarch64__
for (; q+1<inch; q+=2)
{
const float* k00 = kernel0_tm.row(q);
const float* k01 = kernel0_tm.row(q+1);
const float* k10 = kernel1_tm.row(q);
const float* k11 = kernel1_tm.row(q+1);
const float* k20 = kernel2_tm.row(q);
const float* k21 = kernel2_tm.row(q+1);
const float* k30 = kernel3_tm.row(q);
const float* k31 = kernel3_tm.row(q+1);
for (int r=0; r<16; r++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"ld1 {v1.4s}, [%2], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
"ld1 {v0.4s}, [%3], #16 \n"
"ld1 {v1.4s}, [%4], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
"ld1 {v0.4s}, [%5], #16 \n"
"ld1 {v1.4s}, [%6], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
"ld1 {v0.4s}, [%7], #16 \n"
"ld1 {v1.4s}, [%8], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k01), // %2
"=r"(k10), // %3
"=r"(k11), // %4
"=r"(k20), // %5
"=r"(k21), // %6
"=r"(k30), // %7
"=r"(k31) // %8
: "0"(ktm2),
"1"(k00),
"2"(k01),
"3"(k10),
"4"(k11),
"5"(k20),
"6"(k21),
"7"(k30),
"8"(k31)
: "cc", "memory", "v0", "v1"
);
#else
asm volatile(
"vld1.f32 {d0-d1}, [%1 :128]! \n"
"vld1.f32 {d2-d3}, [%2 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
"vld1.f32 {d0-d1}, [%3 :128]! \n"
"vld1.f32 {d2-d3}, [%4 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"vld1.f32 {d2-d3}, [%6 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
"vld1.f32 {d0-d1}, [%7 :128]! \n"
"vld1.f32 {d2-d3}, [%8 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k01), // %2
"=r"(k10), // %3
"=r"(k11), // %4
"=r"(k20), // %5
"=r"(k21), // %6
"=r"(k30), // %7
"=r"(k31) // %8
: "0"(ktm2),
"1"(k00),
"2"(k01),
"3"(k10),
"4"(k11),
"5"(k20),
"6"(k21),
"7"(k30),
"8"(k31)
: "cc", "memory", "q0", "q1"
);
#endif // __aarch64__
#else
for (int m=0; m<4; m++)
{
ktm2[0 +m] = k00[m];
ktm2[4 +m] = k01[m];
ktm2[8 +m] = k10[m];
ktm2[12+m] = k11[m];
ktm2[16+m] = k20[m];
ktm2[20+m] = k21[m];
ktm2[24+m] = k30[m];
ktm2[28+m] = k31[m];
}
k00 += 4;
k01 += 4;
k10 += 4;
k11 += 4;
k20 += 4;
k21 += 4;
k30 += 4;
k31 += 4;
ktm2 += 32;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* k00 = kernel0_tm.row(q);
const float* k10 = kernel1_tm.row(q);
const float* k20 = kernel2_tm.row(q);
const float* k30 = kernel3_tm.row(q);
for (int r=0; r<16; r++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"ld1 {v1.4s}, [%2], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
"ld1 {v0.4s}, [%3], #16 \n"
"ld1 {v1.4s}, [%4], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k10), // %2
"=r"(k20), // %3
"=r"(k30) // %4
: "0"(ktm2),
"1"(k00),
"2"(k10),
"3"(k20),
"4"(k30)
: "cc", "memory", "v0", "v1"
);
#else
asm volatile(
"vld1.f32 {d0-d1}, [%1 :128]! \n"
"vld1.f32 {d2-d3}, [%2 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
"vld1.f32 {d0-d1}, [%3 :128]! \n"
"vld1.f32 {d2-d3}, [%4 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k10), // %2
"=r"(k20), // %3
"=r"(k30) // %4
: "0"(ktm2),
"1"(k00),
"2"(k10),
"3"(k20),
"4"(k30)
: "cc", "memory", "q0", "q1"
);
#endif // __aarch64__
#else
for (int m=0; m<4; m++)
{
ktm2[0 +m] = k00[m];
ktm2[4 +m] = k10[m];
ktm2[8 +m] = k20[m];
ktm2[12+m] = k30[m];
}
k00 += 4;
k10 += 4;
k20 += 4;
k30 += 4;
ktm2 += 16;
#endif // __ARM_NEON
}
}
}
#pragma omp parallel for
for (int p = remain_outch_start; p<outch; p++)
{
float* ktm2 = (float*)kernel_tm2.channel(nn_outch) + 8*8 * inch * (p-remain_outch_start);
const Mat kernel0_tm = kernel_tm.channel(p);
int q = 0;
for (; q<inch; q++)
{
const float* k00 = kernel0_tm.row(q);
for (int r=0; r<16; r++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"st1 {v0.4s}, [%0], #16 \n"
: "=r"(ktm2), // %0
"=r"(k00) // %1
: "0"(ktm2),
"1"(k00)
: "cc", "memory", "v0"
);
#else
asm volatile(
"vld1.f32 {d0-d1}, [%1 :128]! \n"
"vst1.f32 {d0-d1}, [%0 :128]! \n"
: "=r"(ktm2), // %0
"=r"(k00) // %1
: "0"(ktm2),
"1"(k00)
: "cc", "memory", "q0"
);
#endif // __aarch64__
#else
for (int m=0; m<4; m++)
{
ktm2[m] = k00[m];
}
k00 += 4;
ktm2 += 4;
#endif // __ARM_NEON
}
}
}
}
}
|
GB_unop__identity_bool_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_bool_fc32)
// op(A') function: GB (_unop_tran__identity_bool_fc32)
// C type: bool
// A type: GxB_FC32_t
// cast: bool cij = (crealf (aij) != 0) || (cimagf (aij) != 0)
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (crealf (aij) != 0) || (cimagf (aij) != 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (crealf (aij) != 0) || (cimagf (aij) != 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_bool_fc32)
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
bool z = (crealf (aij) != 0) || (cimagf (aij) != 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
bool z = (crealf (aij) != 0) || (cimagf (aij) != 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_bool_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mixed_tentusscher_myo_epi_2004_S2_11.c | // Scenario 2 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S2_11.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5169285143903,0.00130428563365721,0.778447364000152,0.778294777556242,0.000176052463458467,0.484556008731646,0.00295106634206365,0.999998331220203,1.95009865571766e-08,1.90405217604297e-05,0.999773931735328,1.00732337673749,0.999997839287066,3.97912244489960e-05,0.947578224058516,9.53631582857868,139.823425609239};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.9822763642886,0.000336521649878696,0.000144542916642332,0.000516942526086760,0.253138096656416,0.171109018622005,0.130336142672705,3.88071468613803,0.0154855862471817,2.16547576686118,1091.40643117116,0.000575140596221629,0.180541766553447,0.0183755879605413,0.00807832472755813,1.82509834179719e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
mandel-omp-copy.c | /*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
//#pragma omp for schedule(runtime)
for (row = 0; row < height; ++row) {
//#pragma omp task
for (col = 0; col < width; ++col) {
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
#if _DISPLAY_
/* Scale color and display point */
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
#pragma omp critical
{
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
}
#else
output[row][col]=k;
#endif
}
}
}
int main(int argc, char *argv[]) {
int maxiter = 1000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
int ** output;
FILE *fp = NULL;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("mandel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
/* Start timing */
double stamp;
START_COUNT_TIME;
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width) {
fprintf(stderr, "Output file not written correctly\n");
}
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
IndexLinear.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/IndexLinear.c"
#else
#ifdef _OPENMP
#include <omp.h>
#endif
/* Threshold used to trigger multithreading */
#ifndef THNN_SPARSE_OMP_THRESHOLD
#define THNN_SPARSE_OMP_THRESHOLD 100000
#endif
/* Threshold used to trigger BLAS axpy call */
#ifndef THNN_SPARSE_OUTDIM_THRESHOLD
#define THNN_SPARSE_OUTDIM_THRESHOLD 49
#endif
/* sign MACRO */
#ifndef THNN_INDEXLINEAR_SIGN
#define THNN_INDEXLINEAR_SIGN(a) ( ( (a) < 0 ) ? -1 : ( (a) > 0 ) )
#endif
static bool THNN_(checkKeysValues)(THLongTensor* keys, THTensor* values)
{
return THLongTensor_size(keys, 0) == THTensor_(nElement)(values)
&& THTensor_(nDimensionLegacyAll)(values) == 1
&& THLongTensor_nDimensionLegacyAll(keys) == 1;
}
void THNN_(IndexLinear_updateOutput)(
THNNState *state,
THLongTensor *keys,
int64_t keysOffset,
THTensor *values,
THLongTensor *sizes,
THLongTensor *cumSumSizes,
THTensor *output,
THTensor *weight,
THTensor *bias,
THTensor *normalizedValues,
int train)
{
/* Retrieve all the dimensions of the problem */
int64_t batchSize = THLongTensor_size(sizes, 0);
int64_t keysSize = THLongTensor_size(keys, 0);
int64_t outDim = THTensor_(size)(bias, 0);
int64_t woutDim = THTensor_(size)(weight, 1);
int maxNormalize = woutDim - outDim;
int64_t* sizesData = THLongTensor_data(sizes);
int64_t* cumSumSizesData = THLongTensor_data(cumSumSizes);
/* Define/resize the normalized values tensor if maxNormalize is > 0 */
real* normalizedValuesData = NULL;
if (maxNormalize)
{
THTensor_(resize1d)(normalizedValues, keysSize);
normalizedValuesData = THTensor_(data)(normalizedValues);
}
/* Resize the output */
THTensor_(resize2d)(output, batchSize, outDim);
/* Access the storage data/strides */
real* outputData = THTensor_(data)(output);
real* valuesData = THTensor_(data)(values);
real* weightData = THTensor_(data)(weight);
int64_t weightStride0 = weight->stride(0);
real* biasData = THTensor_(data)(bias);
int64_t* keysData = THLongTensor_data(keys);
/* Make sure these inputs are contiguous to accelerate computations */
THArgCheck(THLongTensor_isContiguous(keys), 1, "keys vector must be contiguous");
THArgCheck(THTensor_(isContiguous)(values), 3, "values vector must be contiguous");
THArgCheck(THTensor_(isContiguous)(output), 6, "output vector must be contiguous");
THArgCheck(THTensor_(isContiguous)(weight), 7, "weight matrix must be contiguous");
THArgCheck(THTensor_(isContiguous)(bias), 8, "bias vector must be contiguous");
THArgCheck(THNN_(checkKeysValues)(keys, values), 1, "Keys and values should have the same number of elements");
THArgCheck(THTensor_(isContiguous)(normalizedValues), 9, "normalizedValues vector must be contiguous");
int64_t i,j,k;
/* Separate cases: output dimension is == 1, or > 1
* This allows for some optimizations. */
if (outDim == 1)
{
THVector_(fill)(outputData, *biasData, batchSize);
if (maxNormalize)
{
/* Parallelize on the batch itself */
#pragma omp parallel \
for private(i,j) \
firstprivate(outDim, keysOffset, \
weightData, keysData, \
valuesData, outputData, \
cumSumSizesData, sizesData) \
schedule(static) \
if(keysSize*outDim > THNN_SPARSE_OMP_THRESHOLD && batchSize > 1)
for (j = 0; j < batchSize; j++)
{
real* loutputData = outputData + j;
real val = 0;
real absVal = 0;
int64_t offset = j == 0 ? 0 : cumSumSizesData[j - 1];
for (i = 0; i < sizesData[j]; i++)
{
int64_t woffset = weightStride0*(keysData[offset] + keysOffset);
absVal = fabs(valuesData[offset]);
if (train)
{
if (absVal > weightData[woffset])
{
weightData[woffset] = absVal;
weightData[woffset+1] = 1/absVal;
}
/*
* The following can be used to scale the size of the updates
* depending on some rule, e.g. the frequency of a feature, ...
* This is used at update time.
* TODO: implement a smarter update scale.
*/
weightData[woffset+2] = 1;
}
normalizedValuesData[offset] = (absVal > weightData[woffset] ? THNN_INDEXLINEAR_SIGN(valuesData[offset]):valuesData[offset]*weightData[woffset+1]) + weightData[woffset+3];
val += normalizedValuesData[offset] * weightData[woffset+maxNormalize];
offset++;
}
*loutputData += val;
}
}
else
{
/* Parallelize on the batch itself */
#pragma omp parallel \
for private(i,j) \
firstprivate(outDim, weightData, \
keysData, valuesData, \
outputData, cumSumSizesData, \
sizesData) \
schedule(static) \
if(keysSize*outDim > THNN_SPARSE_OMP_THRESHOLD && batchSize > 1)
for (j = 0; j < batchSize; j++)
{
int64_t offset = j == 0 ? 0 : cumSumSizesData[j - 1];
real* loutputData = outputData + j;
real val = 0;
for (i = 0; i < sizesData[j]; i++)
{
val += weightData[weightStride0*(keysData[offset] + keysOffset)] * valuesData[offset];
offset++;
}
*loutputData += val;
}
}
}
else {
#pragma omp parallel \
for private(i,j,k) \
firstprivate(outDim, weightData, \
keysData, valuesData, \
biasData, outputData, \
cumSumSizesData, sizesData) \
schedule(static) \
if(keysSize*outDim > THNN_SPARSE_OMP_THRESHOLD && batchSize > 1)
for (j = 0; j < batchSize; j++)
{
int64_t offset = j == 0 ? 0 : cumSumSizesData[j - 1];
real val;
real* loutputData = outputData + j*outDim;
real* lweightData = weightData;
memcpy(loutputData, biasData, outDim*sizeof(real));
for (i = 0; i < sizesData[j]; i++)
{
int64_t woffset = weightStride0*(keysData[offset] + keysOffset);
if (maxNormalize)
{
val = valuesData[offset];
real absVal = fabs(val);
if (train)
{
if (absVal > weightData[woffset])
{
weightData[woffset] = absVal;
weightData[woffset+1] = 1/absVal;
}
/*
* The following can be used to scale the size of the updates
* depending on some rule, e.g. the frequency of a feature, ...
* The commented section thereafter is just an example of what can be done:
*
*```
* weightData[woffset+2] = weightData[woffset+2]==0?1:(weightData[woffset+2] / (weightData[woffset+2] + 1));
* real alpha = 1;
* real beta = 0.01;
* real gamma = 1 - 0.000001;
* real l = weightData[woffset+2]==0?1/gamma:(weightData[woffset+2] - beta) / (alpha - beta);
* l = gamma*l;
* weightData[woffset+2] = (alpha-beta)*l + beta;
* ```
*
* TODO: implement a smarter update scale.
*/
weightData[woffset+2] = 1;
}
/* Normalize + Clamp */
val = (absVal > weightData[woffset] ? THNN_INDEXLINEAR_SIGN(val):val*weightData[woffset+1]) + weightData[woffset+3];
normalizedValuesData[offset] = val;
lweightData = weightData + woffset + maxNormalize;
}
else
{
val = valuesData[offset];
lweightData = weightData + woffset;
}
if (outDim > THNN_SPARSE_OUTDIM_THRESHOLD)
{
THBlas_(axpy)(outDim, val, lweightData, 1, loutputData, 1);
}
else
{
for (k=0; k < outDim; k++)
{
loutputData[k] += lweightData[k] * val;
}
}
offset++;
}
}
}
return;
}
void THNN_(IndexLinear_updateParameters)(
THNNState *state,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *weight,
THTensor *bias,
THLongTensor *runningKeys,
THLongTensor *cumSumSizes,
int64_t keysOffset,
accreal weightDecay_,
accreal learningRate_)
{
real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_);
real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_);
/* Retrieve all the dimensions of the problem */
int64_t outDim = THTensor_(size)(bias, 0);
int64_t woutDim = THTensor_(size)(weight, 1);
int maxNormalize = woutDim - outDim;
int64_t keysSize = THLongTensor_size(runningKeys, 0);
/* Access the storage data/strides */
real* gradWeightData = THTensor_(data)(gradWeight);
real* weightData = THTensor_(data)(weight);
int64_t weightStride0 = weight->stride(0);
real* gradBiasData = THTensor_(data)(gradBias);
real* biasData = THTensor_(data)(bias);
int64_t* keysData = THLongTensor_data(runningKeys);
/* Make sure these inputs are contiguous to accelerate computations */
THArgCheck(THTensor_(isContiguous)(gradWeight), 1, "gradWeight must be contiguous");
THArgCheck(THTensor_(isContiguous)(gradBias), 2, "gradBias vector must be contiguous");
THArgCheck(THTensor_(isContiguous)(weight), 3, "gradBias vector must be contiguous");
THArgCheck(THTensor_(isContiguous)(bias), 4, "gradBias vector must be contiguous");
THArgCheck(THLongTensor_isContiguous(runningKeys), 5, "keys vector must be contiguous");
int j, k;
/* Update the bias first */
THVector_(cadd)(biasData, biasData, gradBiasData, -learningRate, outDim);
/* Separate cases: output dimension is == 1, or > 1
* This allows for some optimizations.
* No multithreading here as this could
* corrupt the results (hogwild style) */
if (outDim == 1)
{
if (maxNormalize)
{
if (weightDecay)
{
for (j = 0; j < keysSize; j++)
{
int64_t woffset = weightStride0*(keysData[j] + keysOffset) + maxNormalize;
real lr = learningRate*weightData[woffset-2];
weightData[woffset-1] -= weightData[woffset]*gradWeightData[2*j]*lr;
weightData[woffset] -= gradWeightData[2*j+1]*lr - weightDecay * weightData[woffset-2] * weightData[woffset];
}
}
else
{
for (j = 0; j < keysSize; j++)
{
int64_t woffset = weightStride0*(keysData[j] + keysOffset) + maxNormalize;
real lr = learningRate*weightData[woffset-2];
weightData[woffset-1] -= weightData[woffset]*gradWeightData[2*j]*lr;
weightData[woffset] -= gradWeightData[2*j+1]*lr;
}
}
}
else
{
if (weightDecay)
{
for (j = 0; j < keysSize; j++)
{
int64_t woffset = weightStride0*(keysData[j] + keysOffset);
weightData[woffset] -= gradWeightData[j]*learningRate + weightDecay * weightData[woffset];
}
}
else
{
for (j = 0; j < keysSize; j++)
{
weightData[weightStride0*(keysData[j] + keysOffset)] -= gradWeightData[j]*learningRate;
}
}
}
}
else
{
for (j = 0; j < keysSize; j++)
{
real lr = learningRate;
real wd = weightDecay;
real* lweightData;
int64_t woffset = weightStride0*(keysData[j] + keysOffset);
real* lgradWeightData = gradWeightData + j*outDim;
if (maxNormalize)
{
lgradWeightData += j*outDim;
/* weightData[woffset + 2] */
lweightData = weightData + woffset + maxNormalize - 2;
lr = lr*lweightData[0];
wd = weightDecay*lweightData[0];
/* weightData[woffset + 3] */
lweightData++;
for (k=0; k < outDim; k++)
{
lweightData[0] -= lgradWeightData[k]*lweightData[k+1]*lr;
}
lweightData++;
lgradWeightData += outDim;
}
else
{
lweightData = weightData + woffset;
}
/* We do sparse weight decay.
* We think it makes more sense. */
if (weightDecay)
{
for (k=0; k < outDim; k++)
{
lweightData[k] -= lweightData[k]*wd;
}
}
if (outDim > THNN_SPARSE_OUTDIM_THRESHOLD)
{
THBlas_(axpy)(outDim, -lr, lgradWeightData, 1, lweightData, 1);
}
else
{
for (k=0; k < outDim; k++)
{
lweightData[k] -= lgradWeightData[k]*lr;
}
}
}
}
}
void THNN_(IndexLinear_accUpdateGradParameters)(
THNNState *state,
THLongTensor *keys,
int64_t keysOffset,
THTensor *values,
THLongTensor *sizes,
THLongTensor *cumSumSizes,
THTensor *gradOutput,
THTensor *weight,
THTensor *bias,
accreal weightDecay_,
accreal scale_)
{
real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_);
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
/* Retrieve all the dimensions of the problem */
int64_t batchSize = THLongTensor_size(sizes, 0);
int64_t outDim = THTensor_(size)(bias, 0);
int64_t woutDim = THTensor_(size)(weight, 1);
int maxNormalize = woutDim - outDim;
THArgCheck(THNN_(checkKeysValues)(keys, values), 1, "Keys and values should have the same number of elements");
/* Access the storage data/strides */
real* gradOutputData = THTensor_(data)(gradOutput);
real* valuesData =THTensor_(data)(values);
real* weightData = THTensor_(data)(weight);
real* biasData = THTensor_(data)(bias);
int64_t weightStride0 = weight->stride(0);
int64_t* keysData = THLongTensor_data(keys);
int64_t* sizesData = THLongTensor_data(sizes);
/* Make sure these inputs are contiguous to accelerate computations */
THArgCheck(THLongTensor_isContiguous(keys), 1, "keys vector must be contiguous");
THArgCheck(THTensor_(isContiguous)(values), 3, "values vector must be contiguous");
THArgCheck(THTensor_(isContiguous)(gradOutput), 6, "gradOutput vector must be contiguous");
THArgCheck(THTensor_(isContiguous)(weight), 7, "weight matrix must be contiguous");
THArgCheck(THTensor_(isContiguous)(bias), 8, "bias matrix must be contiguous");
int i,j,k;
/* Separate cases: output dimension is == 1, or > 1
* This allows for some optimizations.
* No multithreading here as this could
* corrupt the results (hogwild style) */
if (outDim == 1)
{
if (maxNormalize)
{
int64_t offset = 0;
for (j = 0; j < batchSize; j++)
{
real* lgradOutputData = gradOutputData + j;
*biasData -= *lgradOutputData * scale;
real val = *lgradOutputData * scale;
for (i = 0; i < sizesData[j]; i++)
{
int64_t idx = weightStride0*(keysData[offset] + keysOffset) + maxNormalize;
weightData[idx-1] -= weightData[idx]*val*weightData[idx-2];
weightData[idx] -= (val*valuesData[offset] - weightDecay * weightData[idx])*weightData[idx-2];
offset++;
}
}
offset = 0;
for (j = 0; j < batchSize; j++)
{
for (i = 0; i < sizesData[j]; i++)
{
int64_t idx = weightStride0*(keysData[offset] + keysOffset) + maxNormalize;
weightData[idx-2] = 0;
offset++;
}
}
}
else
{
if (weightDecay)
{
int64_t offset = 0;
for (j = 0; j < batchSize; j++)
{
real* lgradOutputData = gradOutputData + j;
*biasData -= *lgradOutputData * scale;
real val = *lgradOutputData * scale;
for (i = 0; i < sizesData[j]; i++)
{
int64_t idx = weightStride0*(keysData[offset] + keysOffset);
weightData[idx] -= val * valuesData[offset] + weightData[idx] * weightDecay;
offset++;
}
}
}
else
{
int64_t offset = 0;
for (j = 0; j < batchSize; j++)
{
real val = gradOutputData[j] * scale;
for (i = 0; i < sizesData[j]; i++)
{
weightData[(keysData[offset] + keysOffset)*weightStride0] -= val * valuesData[offset];
offset++;
}
*biasData -= val;
}
}
}
}
else {
int64_t offset = 0;
for (j = 0; j < batchSize; j++)
{
real* lgradOutputData = gradOutputData + j*outDim;
real* lweightData = weightData;
THVector_(cadd)(biasData, biasData, lgradOutputData, -scale, outDim);
for (i = 0; i < sizesData[j]; i++)
{
real val = valuesData[offset] * scale;
real wd = weightDecay;
// Max normalize case
if (maxNormalize)
{
lweightData = weightData + weightStride0*(keysData[offset] + keysOffset) + (maxNormalize-2);
val *= lweightData[0];
wd *= lweightData[0];
for (k=0; k < outDim; k++)
{
lweightData[1] -= lweightData[k+2]*scale*lgradOutputData[k]*lweightData[0];
}
lweightData += 2;
}
else
{
lweightData = weightData + weightStride0*(keysData[offset] + keysOffset);
}
/* We do sparse weight decay.
* We think it makes more sense. */
if (weightDecay)
{
if (outDim > THNN_SPARSE_OUTDIM_THRESHOLD)
{
THBlas_(axpy)(outDim, -wd, lweightData, 1, lweightData, 1);
}
else
{
for (k=0; k < outDim; k++)
{
lweightData[k] -= wd * lweightData[k];
}
}
}
if (outDim > THNN_SPARSE_OUTDIM_THRESHOLD)
{
THBlas_(axpy)(outDim, -val, lgradOutputData, 1, lweightData, 1);
}
else
{
for (k=0; k < outDim; k++)
{
lweightData[k] -= val * lgradOutputData[k];
}
}
offset++;
}
}
/* Max Normalize case:
* Reset the smart update scaling if
* one does it batch-wise.
* TODO: Decide what to do with that piece of code.
* NB: If the code belowe is uncommented, so should the commented
* code in IndexLinear:zeroGradParameters() */
/*
if (maxNormalize)
{
offset = 0;
for (j = 0; j < batchSize; j++)
{
real* lweightData = weightData;
for (i = 0; i < sizesData[j]; i++)
{
real val = valuesData[offset] * scale;
real wd = weightDecay;
lweightData = weightData + weightStride0*(keysData[offset] + keysOffset) + (maxNormalize-2);
lweightData[0] = 0;
offset++;
}
}
}
*/
}
return;
}
void THNN_(IndexLinear_accGradParameters)(
THNNState *state,
THLongTensor *keys,
int64_t keysOffset,
THTensor *values,
THLongTensor *sizes,
THLongTensor *cumSumSizes,
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *weight,
THTensor *bias,
THTensor *valuesBuffer,
accreal weightDecay_,
accreal scale_)
{
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
/* Retrieve all the dimensions of the problem */
int64_t batchSize = THLongTensor_size(sizes, 0);
int64_t keysSize = THLongTensor_size(keys, 0);
int64_t outDim = THTensor_(size)(bias, 0);
int64_t woutDim = THTensor_(size)(weight, 1);
int64_t maxNormalize = (woutDim - outDim) > 0 ?1:0;
THArgCheck(THNN_(checkKeysValues)(keys, values), 1, "Keys and values should have the same number of elements");
int64_t* sizesData = THLongTensor_data(sizes);
/* COmpute the cumulative sizes */
THLongTensor* cumSizes = THLongTensor_new();
THLongTensor_cumsum(cumSizes, sizes, 0);
int64_t* cumSizesData = THLongTensor_data(cumSizes);
/* Resize the gradWeight buffer to keep it dense.
* That speeds up updates A LOT assuming random mem access. */
THTensor_(resize2d)(gradWeight, keysSize, outDim * (maxNormalize>0?2:1));
/* Access the storage data/strides */
real* gradOutputData = THTensor_(data)(gradOutput);
real* valuesData =THTensor_(data)(values);
real* gradWeightData = THTensor_(data)(gradWeight);
real* gradBiasData = THTensor_(data)(gradBias);
/* Make sure these inputs are contiguous to accelerate computations */
THArgCheck(THLongTensor_isContiguous(keys), 1, "keys vector must be contiguous");
THArgCheck(THTensor_(isContiguous)(values), 3, "values vector must be contiguous");
THArgCheck(THTensor_(isContiguous)(gradOutput), 6, "gradOutput vector must be contiguous");
THArgCheck(THTensor_(isContiguous)(gradWeight), 7, "gradWeight must be contiguous");
THArgCheck(THTensor_(isContiguous)(gradBias), 8, "gradBias vector must be contiguous");
THArgCheck(THTensor_(isContiguous)(weight), 9, "weight must be contiguous");
THArgCheck(THTensor_(isContiguous)(bias), 10, "bias vector must be contiguous");
THArgCheck(THTensor_(isContiguous)(valuesBuffer), 11, "valuesBuffer must be contiguous");
int i,j,k;
/* Separate cases: output dimension is == 1, or > 1
* This allows for some optimizations.
* No multithreading here as this could
* corrupt the results (hogwild style) */
if (outDim == 1)
{
for (j = 0; j < batchSize; j++)
{
int64_t offset = j==0?0:cumSizesData[j-1];
real val = gradOutputData[j] * scale;
real* lgradWeightData = gradWeightData + offset;
real* lvaluesData = valuesData + offset;
int64_t end = sizesData[j];
if (maxNormalize)
{
lgradWeightData += offset;
i = 0;
for(;i < end; i++)
{
lgradWeightData[2*i] = val;
lgradWeightData[2*i+1] = val * lvaluesData[i];
}
}
else
{
i = 0;
for(;i < end-4; i += 4)
{
lgradWeightData[i] = val * lvaluesData[i];
lgradWeightData[i+1] = val * lvaluesData[i+1];
lgradWeightData[i+2] = val * lvaluesData[i+2];
lgradWeightData[i+3] = val * lvaluesData[i+3];
}
for(; i < end; i++)
{
lgradWeightData[i] = val * lvaluesData[i];
}
}
*gradBiasData += val;
offset += end;
}
}
else {
for (j = 0; j < batchSize; j++)
{
int64_t offset = j==0?0:cumSizesData[j-1];
real* lgradOutputData = gradOutputData + j*outDim;
real* lgradWeightData = gradWeightData;
THVector_(cadd)(gradBiasData, gradBiasData, lgradOutputData, scale, outDim);
for (i = 0; i < sizesData[j]; i++)
{
real val = valuesData[offset] * scale;
lgradWeightData = gradWeightData + offset*outDim;
if (maxNormalize)
{
lgradWeightData += offset*outDim;
k = 0;
for(;k < outDim-4; k += 4)
{
lgradWeightData[k] = lgradOutputData[k]*scale;
lgradWeightData[k+1] = lgradOutputData[k+1]*scale;
lgradWeightData[k+2] = lgradOutputData[k+2]*scale;
lgradWeightData[k+3] = lgradOutputData[k+3]*scale;
}
for(; k < outDim; k++)
{
lgradWeightData[k] = lgradOutputData[k]*scale;
}
lgradWeightData += outDim;
}
k = 0;
for(;k < outDim-4; k += 4)
{
lgradWeightData[k] = val * lgradOutputData[k];
lgradWeightData[k+1] = val * lgradOutputData[k+1];
lgradWeightData[k+2] = val * lgradOutputData[k+2];
lgradWeightData[k+3] = val * lgradOutputData[k+3];
}
for(; k < outDim; k++)
{
lgradWeightData[k] = val * lgradOutputData[k];
}
offset++;
}
}
}
THLongTensor_free(cumSizes);
return;
}
#endif
|
c-tree.h | /* Definitions for C parsing and type checking.
Copyright (C) 1987-2020 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_C_TREE_H
#define GCC_C_TREE_H
#include "c-family/c-common.h"
#include "diagnostic.h"
/* struct lang_identifier is private to c-decl.c, but langhooks.c needs to
know how big it is. This is sanity-checked in c-decl.c. */
#define C_SIZEOF_STRUCT_LANG_IDENTIFIER \
(sizeof (struct c_common_identifier) + 3 * sizeof (void *))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
#define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
nonzero if the definition of the type has already started. */
#define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE)
/* In an incomplete RECORD_TYPE, UNION_TYPE or ENUMERAL_TYPE, a list of
variable declarations whose type would be completed by completing
that type. */
#define C_TYPE_INCOMPLETE_VARS(TYPE) \
TYPE_LANG_SLOT_1 (TREE_CHECK4 (TYPE, RECORD_TYPE, UNION_TYPE, \
QUAL_UNION_TYPE, ENUMERAL_TYPE))
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID)
/* Record whether a type or decl was written with nonconstant size.
Note that TYPE_SIZE may have simplified to a constant. */
#define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE)
#define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE)
/* Record whether a type is defined inside a struct or union type.
This is used for -Wc++-compat. */
#define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE)
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was defined without an explicit
return type. */
#define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */
#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
/* For a PARM_DECL, nonzero if it was declared as an array. */
#define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0 (NODE)
/* For FUNCTION_DECLs, evaluates true if the decl is built-in but has
been declared. */
#define C_DECL_DECLARED_BUILTIN(EXP) \
DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP))
/* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a
built-in prototype and does not have a non-built-in prototype. */
#define C_DECL_BUILTIN_PROTOTYPE(EXP) \
DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a decl was declared register. This is strictly a
front-end flag, whereas DECL_REGISTER is used for code generation;
they may differ for structures with volatile fields. */
#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP)
/* Record whether a decl was used in an expression anywhere except an
unevaluated operand of sizeof / typeof / alignof. This is only
used for functions declared static but not defined, though outside
sizeof and typeof it is set for other function decls as well. */
#define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a variable has been declared threadprivate by
#pragma omp threadprivate. */
#define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL))
/* Set on VAR_DECLs for compound literals. */
#define C_DECL_COMPOUND_LITERAL_P(DECL) \
DECL_LANG_FLAG_5 (VAR_DECL_CHECK (DECL))
/* Nonzero for a decl which either doesn't exist or isn't a prototype.
N.B. Could be simplified if all built-in decls had complete prototypes
(but this is presently difficult because some of them need FILE*). */
#define C_DECL_ISNT_PROTOTYPE(EXP) \
(EXP == 0 \
|| (!prototype_p (TREE_TYPE (EXP)) \
&& !fndecl_built_in_p (EXP)))
/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
TYPE_ARG_TYPES for functions with prototypes, but created for functions
without prototypes. */
#define TYPE_ACTUAL_ARG_TYPES(NODE) \
TYPE_LANG_SLOT_1 (FUNCTION_TYPE_CHECK (NODE))
/* For a CONSTRUCTOR, whether some initializer contains a
subexpression meaning it is not a constant expression. */
#define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR))
/* For a SAVE_EXPR, nonzero if the operand of the SAVE_EXPR has already
been folded. */
#define SAVE_EXPR_FOLDED_P(EXP) TREE_LANG_FLAG_1 (SAVE_EXPR_CHECK (EXP))
/* Record parser information about an expression that is irrelevant
for code generation alongside a tree representing its value. */
struct c_expr
{
/* The value of the expression. */
tree value;
/* Record the original unary/binary operator of an expression, which may
have been changed by fold, STRING_CST for unparenthesized string
constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls
(even if parenthesized), for subexpressions, and for non-constant
initializers, or ERROR_MARK for other expressions (including
parenthesized expressions). */
enum tree_code original_code;
/* If not NULL, the original type of an expression. This will
differ from the type of the value field for an enum constant.
The type of an enum constant is a plain integer type, but this
field will be the enum type. */
tree original_type;
/* The source range of this expression. This is redundant
for node values that have locations, but not all node kinds
have locations (e.g. constants, and references to params, locals,
etc), so we stash a copy here. */
source_range src_range;
/* Access to the first and last locations within the source spelling
of this expression. */
location_t get_start () const { return src_range.m_start; }
location_t get_finish () const { return src_range.m_finish; }
location_t get_location () const
{
if (EXPR_HAS_LOCATION (value))
return EXPR_LOCATION (value);
else
return make_location (get_start (), get_start (), get_finish ());
}
/* Set the value to error_mark_node whilst ensuring that src_range
is initialized. */
void set_error ()
{
value = error_mark_node;
src_range.m_start = UNKNOWN_LOCATION;
src_range.m_finish = UNKNOWN_LOCATION;
}
};
/* Type alias for struct c_expr. This allows to use the structure
inside the VEC types. */
typedef struct c_expr c_expr_t;
/* A kind of type specifier. Note that this information is currently
only used to distinguish tag definitions, tag references and typeof
uses. */
enum c_typespec_kind {
/* No typespec. This appears only in struct c_declspec. */
ctsk_none,
/* A reserved keyword type specifier. */
ctsk_resword,
/* A reference to a tag, previously declared, such as "struct foo".
This includes where the previous declaration was as a different
kind of tag, in which case this is only valid if shadowing that
tag in an inner scope. */
ctsk_tagref,
/* Likewise, with standard attributes present in the reference. */
ctsk_tagref_attrs,
/* A reference to a tag, not previously declared in a visible
scope. */
ctsk_tagfirstref,
/* Likewise, with standard attributes present in the reference. */
ctsk_tagfirstref_attrs,
/* A definition of a tag such as "struct foo { int a; }". */
ctsk_tagdef,
/* A typedef name. */
ctsk_typedef,
/* An ObjC-specific kind of type specifier. */
ctsk_objc,
/* A typeof specifier, or _Atomic ( type-name ). */
ctsk_typeof
};
/* A type specifier: this structure is created in the parser and
passed to declspecs_add_type only. */
struct c_typespec {
/* What kind of type specifier this is. */
enum c_typespec_kind kind;
/* Whether the expression has operands suitable for use in constant
expressions. */
bool expr_const_operands;
/* The specifier itself. */
tree spec;
/* An expression to be evaluated before the type specifier, in the
case of typeof specifiers, or NULL otherwise or if no such
expression is required for a particular typeof specifier. In
particular, when typeof is applied to an expression of variably
modified type, that expression must be evaluated in order to
determine array sizes that form part of the type, but the
expression itself (as opposed to the array sizes) forms no part
of the type and so needs to be recorded separately. */
tree expr;
};
/* A storage class specifier. */
enum c_storage_class {
csc_none,
csc_auto,
csc_extern,
csc_register,
csc_static,
csc_typedef
};
/* A type specifier keyword "void", "_Bool", "char", "int", "float",
"double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum",
or none of these. */
enum c_typespec_keyword {
cts_none,
cts_void,
cts_bool,
cts_char,
cts_int,
cts_float,
cts_int_n,
cts_double,
cts_dfloat32,
cts_dfloat64,
cts_dfloat128,
cts_floatn_nx,
cts_fract,
cts_accum,
cts_auto_type
};
/* This enum lists all the possible declarator specifiers, storage
class or attribute that a user can write. There is at least one
enumerator per possible declarator specifier in the struct
c_declspecs below.
It is used to index the array of declspec locations in struct
c_declspecs. */
enum c_declspec_word {
cdw_typespec /* A catch-all for a typespec. */,
cdw_storage_class /* A catch-all for a storage class */,
cdw_attributes,
cdw_typedef,
cdw_explicit_signed,
cdw_deprecated,
cdw_default_int,
cdw_long,
cdw_long_long,
cdw_short,
cdw_signed,
cdw_unsigned,
cdw_complex,
cdw_inline,
cdw_noreturn,
cdw_thread,
cdw_const,
cdw_volatile,
cdw_restrict,
cdw_atomic,
cdw_saturating,
cdw_alignas,
cdw_address_space,
cdw_gimple,
cdw_rtl,
cdw_number_of_elements /* This one must always be the last
enumerator. */
};
enum c_declspec_il {
cdil_none,
cdil_gimple, /* __GIMPLE */
cdil_gimple_cfg, /* __GIMPLE(cfg) */
cdil_gimple_ssa, /* __GIMPLE(ssa) */
cdil_rtl /* __RTL */
};
/* A sequence of declaration specifiers in C. When a new declaration
specifier is added, please update the enum c_declspec_word above
accordingly. */
struct c_declspecs {
location_t locations[cdw_number_of_elements];
/* The type specified, if a single type specifier such as a struct,
union or enum specifier, typedef name or typeof specifies the
whole type, or NULL_TREE if none or a keyword such as "void" or
"char" is used. Does not include qualifiers. */
tree type;
/* Any expression to be evaluated before the type, from a typeof
specifier. */
tree expr;
/* The attributes from a typedef decl. */
tree decl_attr;
/* When parsing, the GNU attributes and prefix standard attributes.
Outside the parser, this will be NULL; attributes (possibly from
multiple lists) will be passed separately. */
tree attrs;
/* When parsing, postfix standard attributes (which appertain to the
type specified by the preceding declaration specifiers, unlike
prefix standard attributes which appertain to the declaration or
declarations as a whole). */
tree postfix_attrs;
/* The pass to start compiling a __GIMPLE or __RTL function with. */
char *gimple_or_rtl_pass;
/* ENTRY BB count. */
profile_count entry_bb_count;
/* The base-2 log of the greatest alignment required by an _Alignas
specifier, in bytes, or -1 if no such specifiers with nonzero
alignment. */
int align_log;
/* For the __intN declspec, this stores the index into the int_n_* arrays. */
int int_n_idx;
/* For the _FloatN and _FloatNx declspec, this stores the index into
the floatn_nx_types array. */
int floatn_nx_idx;
/* The storage class specifier, or csc_none if none. */
enum c_storage_class storage_class;
/* Any type specifier keyword used such as "int", not reflecting
modifiers such as "short", or cts_none if none. */
ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8;
/* The kind of type specifier if one has been seen, ctsk_none
otherwise. */
ENUM_BITFIELD (c_typespec_kind) typespec_kind : 4;
ENUM_BITFIELD (c_declspec_il) declspec_il : 3;
/* Whether any expressions in typeof specifiers may appear in
constant expressions. */
BOOL_BITFIELD expr_const_operands : 1;
/* Whether any declaration specifiers have been seen at all. */
BOOL_BITFIELD declspecs_seen_p : 1;
/* Whether any declaration specifiers other than standard attributes
have been seen at all. If only standard attributes have been
seen, this is an attribute-declaration. */
BOOL_BITFIELD non_std_attrs_seen_p : 1;
/* Whether something other than a storage class specifier or
attribute has been seen. This is used to warn for the
obsolescent usage of storage class specifiers other than at the
start of the list. (Doing this properly would require function
specifiers to be handled separately from storage class
specifiers.) */
BOOL_BITFIELD non_sc_seen_p : 1;
/* Whether the type is specified by a typedef or typeof name. */
BOOL_BITFIELD typedef_p : 1;
/* Whether the type is explicitly "signed" or specified by a typedef
whose type is explicitly "signed". */
BOOL_BITFIELD explicit_signed_p : 1;
/* Whether the specifiers include a deprecated typedef. */
BOOL_BITFIELD deprecated_p : 1;
/* Whether the type defaulted to "int" because there were no type
specifiers. */
BOOL_BITFIELD default_int_p : 1;
/* Whether "long" was specified. */
BOOL_BITFIELD long_p : 1;
/* Whether "long" was specified more than once. */
BOOL_BITFIELD long_long_p : 1;
/* Whether "short" was specified. */
BOOL_BITFIELD short_p : 1;
/* Whether "signed" was specified. */
BOOL_BITFIELD signed_p : 1;
/* Whether "unsigned" was specified. */
BOOL_BITFIELD unsigned_p : 1;
/* Whether "complex" was specified. */
BOOL_BITFIELD complex_p : 1;
/* Whether "inline" was specified. */
BOOL_BITFIELD inline_p : 1;
/* Whether "_Noreturn" was speciied. */
BOOL_BITFIELD noreturn_p : 1;
/* Whether "__thread" or "_Thread_local" was specified. */
BOOL_BITFIELD thread_p : 1;
/* Whether "__thread" rather than "_Thread_local" was specified. */
BOOL_BITFIELD thread_gnu_p : 1;
/* Whether "const" was specified. */
BOOL_BITFIELD const_p : 1;
/* Whether "volatile" was specified. */
BOOL_BITFIELD volatile_p : 1;
/* Whether "restrict" was specified. */
BOOL_BITFIELD restrict_p : 1;
/* Whether "_Atomic" was specified. */
BOOL_BITFIELD atomic_p : 1;
/* Whether "_Sat" was specified. */
BOOL_BITFIELD saturating_p : 1;
/* Whether any alignment specifier (even with zero alignment) was
specified. */
BOOL_BITFIELD alignas_p : 1;
/* The address space that the declaration belongs to. */
addr_space_t address_space;
};
/* The various kinds of declarators in C. */
enum c_declarator_kind {
/* An identifier. */
cdk_id,
/* A function. */
cdk_function,
/* An array. */
cdk_array,
/* A pointer. */
cdk_pointer,
/* Parenthesized declarator with nested attributes. */
cdk_attrs
};
struct c_arg_tag {
/* The argument name. */
tree id;
/* The type of the argument. */
tree type;
};
/* Information about the parameters in a function declarator. */
struct c_arg_info {
/* A list of parameter decls. */
tree parms;
/* A list of structure, union and enum tags defined. */
vec<c_arg_tag, va_gc> *tags;
/* A list of argument types to go in the FUNCTION_TYPE. */
tree types;
/* A list of non-parameter decls (notably enumeration constants)
defined with the parameters. */
tree others;
/* A compound expression of VLA sizes from the parameters, or NULL.
In a function definition, these are used to ensure that
side-effects in sizes of arrays converted to pointers (such as a
parameter int i[n++]) take place; otherwise, they are
ignored. */
tree pending_sizes;
/* True when these arguments had [*]. */
BOOL_BITFIELD had_vla_unspec : 1;
};
/* A declarator. */
struct c_declarator {
/* The kind of declarator. */
enum c_declarator_kind kind;
location_t id_loc; /* Currently only set for cdk_id, cdk_array. */
/* Except for cdk_id, the contained declarator. For cdk_id, NULL. */
struct c_declarator *declarator;
union {
/* For identifiers. */
struct {
/* An IDENTIFIER_NODE, or NULL_TREE if an abstract
declarator. */
tree id;
/* Any attributes (which apply to the declaration rather than to
the type described by the outer declarators). */
tree attrs;
} id;
/* For functions. */
struct c_arg_info *arg_info;
/* For arrays. */
struct {
/* The array dimension, or NULL for [] and [*]. */
tree dimen;
/* The qualifiers inside []. */
int quals;
/* The attributes (currently ignored) inside []. */
tree attrs;
/* Whether [static] was used. */
BOOL_BITFIELD static_p : 1;
/* Whether [*] was used. */
BOOL_BITFIELD vla_unspec_p : 1;
} array;
/* For pointers, the qualifiers on the pointer type. */
int pointer_quals;
/* For attributes. */
tree attrs;
} u;
};
/* A type name. */
struct c_type_name {
/* The declaration specifiers. */
struct c_declspecs *specs;
/* The declarator. */
struct c_declarator *declarator;
};
/* A parameter. */
struct c_parm {
/* The declaration specifiers, minus any prefix attributes. */
struct c_declspecs *specs;
/* The attributes. */
tree attrs;
/* The declarator. */
struct c_declarator *declarator;
/* The location of the parameter. */
location_t loc;
};
/* Used when parsing an enum. Initialized by start_enum. */
struct c_enum_contents
{
/* While defining an enum type, this is 1 plus the last enumerator
constant value. */
tree enum_next_value;
/* Nonzero means that there was overflow computing enum_next_value. */
int enum_overflow;
};
/* A type of reference to a static identifier in an inline
function. */
enum c_inline_static_type {
/* Identifier with internal linkage used in function that may be an
inline definition (i.e., file-scope static). */
csi_internal,
/* Modifiable object with static storage duration defined in
function that may be an inline definition (i.e., local
static). */
csi_modifiable
};
/* in c-parser.c */
extern void c_parse_init (void);
extern bool c_keyword_starts_typename (enum rid keyword);
/* in c-aux-info.c */
extern void gen_aux_info_record (tree, int, int, int);
/* in c-decl.c */
struct c_spot_bindings;
class c_struct_parse_info;
extern struct obstack parser_obstack;
extern tree c_break_label;
extern tree c_cont_label;
extern bool global_bindings_p (void);
extern tree pushdecl (tree);
extern void push_scope (void);
extern tree pop_scope (void);
extern void c_bindings_start_stmt_expr (struct c_spot_bindings *);
extern void c_bindings_end_stmt_expr (struct c_spot_bindings *);
extern void record_inline_static (location_t, tree, tree,
enum c_inline_static_type);
extern void c_init_decl_processing (void);
extern void c_print_identifier (FILE *, tree, int);
extern int quals_from_declspecs (const struct c_declspecs *);
extern struct c_declarator *build_array_declarator (location_t, tree,
struct c_declspecs *,
bool, bool);
extern tree build_enumerator (location_t, location_t, struct c_enum_contents *,
tree, tree);
extern tree check_for_loop_decls (location_t, bool);
extern void mark_forward_parm_decls (void);
extern void declare_parm_level (void);
extern void undeclared_variable (location_t, tree);
extern tree lookup_label_for_goto (location_t, tree);
extern tree declare_label (tree);
extern tree define_label (location_t, tree);
extern struct c_spot_bindings *c_get_switch_bindings (void);
extern void c_release_switch_bindings (struct c_spot_bindings *);
extern bool c_check_switch_jump_warnings (struct c_spot_bindings *,
location_t, location_t);
extern void finish_decl (tree, location_t, tree, tree, tree);
extern tree finish_enum (tree, tree, tree);
extern void finish_function (location_t = input_location);
extern tree finish_struct (location_t, tree, tree, tree,
class c_struct_parse_info *);
extern tree c_simulate_enum_decl (location_t, const char *,
vec<string_int_pair>);
extern struct c_arg_info *build_arg_info (void);
extern struct c_arg_info *get_parm_info (bool, tree);
extern tree grokfield (location_t, struct c_declarator *,
struct c_declspecs *, tree, tree *);
extern tree groktypename (struct c_type_name *, tree *, bool *);
extern tree grokparm (const struct c_parm *, tree *);
extern tree implicitly_declare (location_t, tree);
extern void keep_next_level (void);
extern void pending_xref_error (void);
extern void c_push_function_context (void);
extern void c_pop_function_context (void);
extern void push_parm_decl (const struct c_parm *, tree *);
extern struct c_declarator *set_array_declarator_inner (struct c_declarator *,
struct c_declarator *);
extern tree c_builtin_function (tree);
extern tree c_builtin_function_ext_scope (tree);
extern tree c_simulate_builtin_function_decl (tree);
extern void c_warn_unused_attributes (tree);
extern tree c_warn_type_attributes (tree);
extern void shadow_tag (const struct c_declspecs *);
extern void shadow_tag_warned (const struct c_declspecs *, int);
extern tree start_enum (location_t, struct c_enum_contents *, tree);
extern bool start_function (struct c_declspecs *, struct c_declarator *, tree);
extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
tree);
extern tree start_struct (location_t, enum tree_code, tree,
class c_struct_parse_info **);
extern void store_parm_decls (void);
extern void store_parm_decls_from (struct c_arg_info *);
extern void temp_store_parm_decls (tree, tree);
extern void temp_pop_parm_decls (void);
extern tree xref_tag (enum tree_code, tree);
extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree,
bool, tree);
extern struct c_parm *build_c_parm (struct c_declspecs *, tree,
struct c_declarator *, location_t);
extern struct c_declarator *build_attrs_declarator (tree,
struct c_declarator *);
extern struct c_declarator *build_function_declarator (struct c_arg_info *,
struct c_declarator *);
extern struct c_declarator *build_id_declarator (tree);
extern struct c_declarator *make_pointer_declarator (struct c_declspecs *,
struct c_declarator *);
extern struct c_declspecs *build_null_declspecs (void);
extern struct c_declspecs *declspecs_add_qual (location_t,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_type (location_t,
struct c_declspecs *,
struct c_typespec);
extern struct c_declspecs *declspecs_add_scspec (location_t,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_attrs (location_t,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_addrspace (location_t,
struct c_declspecs *,
addr_space_t);
extern struct c_declspecs *declspecs_add_alignas (location_t,
struct c_declspecs *, tree);
extern struct c_declspecs *finish_declspecs (struct c_declspecs *);
/* in c-objc-common.c */
extern bool c_objc_common_init (void);
extern bool c_missing_noreturn_ok_p (tree);
extern bool c_warn_unused_global_decl (const_tree);
extern void c_initialize_diagnostics (diagnostic_context *);
extern bool c_vla_unspec_p (tree x, tree fn);
extern alias_set_type c_get_alias_set (tree);
/* in c-typeck.c */
extern int in_alignof;
extern int in_sizeof;
extern int in_typeof;
extern tree c_last_sizeof_arg;
extern location_t c_last_sizeof_loc;
extern struct c_switch *c_switch_stack;
extern tree c_objc_common_truthvalue_conversion (location_t, tree);
extern tree require_complete_type (location_t, tree);
extern bool same_translation_unit_p (const_tree, const_tree);
extern int comptypes (tree, tree);
extern int comptypes_check_different_types (tree, tree, bool *);
extern bool c_vla_type_p (const_tree);
extern bool c_mark_addressable (tree, bool = false);
extern void c_incomplete_type_error (location_t, const_tree, const_tree);
extern tree c_type_promotes_to (tree);
extern struct c_expr default_function_array_conversion (location_t,
struct c_expr);
extern struct c_expr default_function_array_read_conversion (location_t,
struct c_expr);
extern struct c_expr convert_lvalue_to_rvalue (location_t, struct c_expr,
bool, bool);
extern tree decl_constant_value_1 (tree, bool);
extern void mark_exp_read (tree);
extern tree composite_type (tree, tree);
extern tree build_component_ref (location_t, tree, tree, location_t);
extern tree build_array_ref (location_t, tree, tree);
extern tree build_external_ref (location_t, tree, bool, tree *);
extern void pop_maybe_used (bool);
extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr);
extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *);
extern struct c_expr parser_build_unary_op (location_t, enum tree_code,
struct c_expr);
extern struct c_expr parser_build_binary_op (location_t,
enum tree_code, struct c_expr,
struct c_expr);
extern tree build_conditional_expr (location_t, tree, bool, tree, tree,
location_t, tree, tree, location_t);
extern tree build_compound_expr (location_t, tree, tree);
extern tree c_cast_expr (location_t, struct c_type_name *, tree);
extern tree build_c_cast (location_t, tree, tree);
extern void store_init_value (location_t, tree, tree, tree);
extern void maybe_warn_string_init (location_t, tree, struct c_expr);
extern void start_init (tree, tree, int, rich_location *);
extern void finish_init (void);
extern void really_start_incremental_init (tree);
extern void finish_implicit_inits (location_t, struct obstack *);
extern void push_init_level (location_t, int, struct obstack *);
extern struct c_expr pop_init_level (location_t, int, struct obstack *,
location_t);
extern void set_init_index (location_t, tree, tree, struct obstack *);
extern void set_init_label (location_t, tree, location_t, struct obstack *);
extern void process_init_element (location_t, struct c_expr, bool,
struct obstack *);
extern tree build_compound_literal (location_t, tree, tree, bool,
unsigned int);
extern void check_compound_literal_type (location_t, struct c_type_name *);
extern tree c_start_case (location_t, location_t, tree, bool);
extern void c_finish_case (tree, tree);
extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool,
bool);
extern tree build_asm_stmt (bool, tree);
extern int c_types_compatible_p (tree, tree);
extern tree c_begin_compound_stmt (bool);
extern tree c_end_compound_stmt (location_t, tree, bool);
extern void c_finish_if_stmt (location_t, tree, tree, tree);
extern void c_finish_loop (location_t, location_t, tree, location_t, tree,
tree, tree, tree, bool);
extern tree c_begin_stmt_expr (void);
extern tree c_finish_stmt_expr (location_t, tree);
extern tree c_process_expr_stmt (location_t, tree);
extern tree c_finish_expr_stmt (location_t, tree);
extern tree c_finish_return (location_t, tree, tree);
extern tree c_finish_bc_stmt (location_t, tree *, bool);
extern tree c_finish_goto_label (location_t, tree);
extern tree c_finish_goto_ptr (location_t, tree);
extern tree c_expr_to_decl (tree, bool *, bool *);
extern tree c_finish_omp_construct (location_t, enum tree_code, tree, tree);
extern tree c_finish_oacc_data (location_t, tree, tree);
extern tree c_finish_oacc_host_data (location_t, tree, tree);
extern tree c_begin_omp_parallel (void);
extern tree c_finish_omp_parallel (location_t, tree, tree);
extern tree c_begin_omp_task (void);
extern tree c_finish_omp_task (location_t, tree, tree);
extern void c_finish_omp_cancel (location_t, tree);
extern void c_finish_omp_cancellation_point (location_t, tree);
extern tree c_finish_omp_clauses (tree, enum c_omp_region_type);
extern tree c_build_va_arg (location_t, tree, location_t, tree);
extern tree c_finish_transaction (location_t, tree, int);
extern bool c_tree_equal (tree, tree);
extern tree c_build_function_call_vec (location_t, vec<location_t>, tree,
vec<tree, va_gc> *, vec<tree, va_gc> *);
extern tree c_omp_clause_copy_ctor (tree, tree, tree);
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
extern int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
extern int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
extern int current_function_returns_abnormally;
/* In c-decl.c */
/* Tell the binding oracle what kind of binding we are looking for. */
enum c_oracle_request
{
C_ORACLE_SYMBOL,
C_ORACLE_TAG,
C_ORACLE_LABEL
};
/* If this is non-NULL, then it is a "binding oracle" which can lazily
create bindings when needed by the C compiler. The oracle is told
the name and type of the binding to create. It can call pushdecl
or the like to ensure the binding is visible; or do nothing,
leaving the binding untouched. c-decl.c takes note of when the
oracle has been called and will not call it again if it fails to
create a given binding. */
typedef void c_binding_oracle_function (enum c_oracle_request, tree identifier);
extern c_binding_oracle_function *c_binding_oracle;
extern void c_finish_incomplete_decl (tree);
extern tree c_omp_reduction_id (enum tree_code, tree);
extern tree c_omp_reduction_decl (tree);
extern tree c_omp_reduction_lookup (tree, tree);
extern tree c_check_omp_declare_reduction_r (tree *, int *, void *);
extern bool c_check_in_current_scope (tree);
extern void c_pushtag (location_t, tree, tree);
extern void c_bind (location_t, tree, bool);
extern bool tag_exists_p (enum tree_code, tree);
/* In c-errors.c */
extern bool pedwarn_c90 (location_t, int opt, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
extern bool pedwarn_c99 (location_t, int opt, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
extern bool pedwarn_c11 (location_t, int opt, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
extern void
set_c_expr_source_range (c_expr *expr,
location_t start, location_t finish);
extern void
set_c_expr_source_range (c_expr *expr,
source_range src_range);
/* In c-fold.c */
extern vec<tree> incomplete_record_decls;
#if CHECKING_P
namespace selftest {
extern void run_c_tests (void);
} // namespace selftest
#endif /* #if CHECKING_P */
#endif /* ! GCC_C_TREE_H */
|
GB_reduce_panel.c | //------------------------------------------------------------------------------
// GB_reduce_panel: s=reduce(A), reduce a matrix to a scalar
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Reduce a matrix to a scalar using a panel-based method for built-in
// operators. No typecasting is performed.
{
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const GB_ATYPE *GB_RESTRICT Ax = (GB_ATYPE *) A->x ;
int64_t anz = GB_NNZ (A) ;
ASSERT (anz > 0) ;
#if GB_IS_ANY_MONOID
// the ANY monoid can take any entry, and terminate immediately
s = Ax [anz-1] ;
#else
//--------------------------------------------------------------------------
// typecast workspace
//--------------------------------------------------------------------------
// ctype W [ntasks] ;
GB_CTYPE *GB_RESTRICT W = (GB_CTYPE *) W_space ;
//--------------------------------------------------------------------------
// reduce A to a scalar
//--------------------------------------------------------------------------
if (nthreads == 1)
{
//----------------------------------------------------------------------
// load the Panel with the first entries
//----------------------------------------------------------------------
GB_ATYPE Panel [GB_PANEL] ;
int64_t first_panel_size = GB_IMIN (GB_PANEL, anz) ;
for (int64_t k = 0 ; k < first_panel_size ; k++)
{
Panel [k] = Ax [k] ;
}
#if GB_HAS_TERMINAL
int panel_count = 0 ;
#endif
//----------------------------------------------------------------------
// reduce all entries to the Panel
//----------------------------------------------------------------------
for (int64_t p = GB_PANEL ; p < anz ; p += GB_PANEL)
{
if (p + GB_PANEL > anz)
{
// last partial panel
for (int64_t k = 0 ; k < anz-p ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
}
else
{
// full panel
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
#if GB_HAS_TERMINAL
panel_count-- ;
if (panel_count <= 0)
{
// check for early exit only every 256 panels
panel_count = 256 ;
int count = 0 ;
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
count += (Panel [k] == GB_TERMINAL_VALUE) ;
}
if (count > 0)
{
break ;
}
}
#endif
}
}
//----------------------------------------------------------------------
// s = reduce (Panel)
//----------------------------------------------------------------------
s = Panel [0] ;
for (int64_t k = 1 ; k < first_panel_size ; k++)
{
// s = op (s, Panel [k]) ;
GB_ADD_ARRAY_TO_SCALAR (s, Panel, k) ;
}
}
else
{
//----------------------------------------------------------------------
// all tasks share a single early_exit flag
//----------------------------------------------------------------------
// If this flag gets set, all tasks can terminate early
#if GB_HAS_TERMINAL
bool early_exit = false ;
#endif
//----------------------------------------------------------------------
// each thread reduces its own slice in parallel
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < ntasks ; tid++)
{
//------------------------------------------------------------------
// determine the work for this task
//------------------------------------------------------------------
// Task tid reduces Ax [pstart:pend-1] to the scalar W [tid]
int64_t pstart, pend ;
GB_PARTITION (pstart, pend, anz, tid, ntasks) ;
GB_ATYPE t = Ax [pstart] ;
//------------------------------------------------------------------
// skip this task if the terminal value has already been reached
//------------------------------------------------------------------
#if GB_HAS_TERMINAL
// check if another task has called for an early exit
bool my_exit ;
GB_ATOMIC_READ
my_exit = early_exit ;
if (!my_exit)
#endif
//------------------------------------------------------------------
// do the reductions for this task
//------------------------------------------------------------------
{
//--------------------------------------------------------------
// load the Panel with the first entries
//--------------------------------------------------------------
GB_ATYPE Panel [GB_PANEL] ;
int64_t my_anz = pend - pstart ;
int64_t first_panel_size = GB_IMIN (GB_PANEL, my_anz) ;
for (int64_t k = 0 ; k < first_panel_size ; k++)
{
Panel [k] = Ax [pstart + k] ;
}
#if GB_HAS_TERMINAL
int panel_count = 0 ;
#endif
//--------------------------------------------------------------
// reduce all entries to the Panel
//--------------------------------------------------------------
for (int64_t p = pstart + GB_PANEL ; p < pend ; p += GB_PANEL)
{
if (p + GB_PANEL > pend)
{
// last partial panel
for (int64_t k = 0 ; k < pend-p ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
}
else
{
// full panel
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
#if GB_HAS_TERMINAL
panel_count-- ;
if (panel_count <= 0)
{
// check for early exit only every 256 panels
panel_count = 256 ;
int count = 0 ;
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
count += (Panel [k] == GB_TERMINAL_VALUE) ;
}
if (count > 0)
{
break ;
}
}
#endif
}
}
//--------------------------------------------------------------
// t = reduce (Panel)
//--------------------------------------------------------------
t = Panel [0] ;
for (int64_t k = 1 ; k < first_panel_size ; k++)
{
// t = op (t, Panel [k]) ;
GB_ADD_ARRAY_TO_SCALAR (t, Panel, k) ;
}
#if GB_HAS_TERMINAL
if (t == GB_TERMINAL_VALUE)
{
// tell all other tasks to exit early
GB_ATOMIC_WRITE
early_exit = true ;
}
#endif
}
//------------------------------------------------------------------
// save the results of this task
//------------------------------------------------------------------
W [tid] = t ;
}
//----------------------------------------------------------------------
// sum up the results of each slice using a single thread
//----------------------------------------------------------------------
s = W [0] ;
for (int tid = 1 ; tid < ntasks ; tid++)
{
// s = op (s, W [tid]), no typecast
GB_ADD_ARRAY_TO_SCALAR (s, W, tid) ;
}
}
#endif
}
|
1.hello.c | #include <stdio.h>
#include <omp.h>
/* Q1: How many times will you see the "Hello world!" */
/* message if the program is executed with "./1.hello"? */
/* Q2: Without changing the program, how to make it to */
/* print 4 times the "Hello World!" message? */
int main ()
{
#pragma omp parallel
printf("Hello world!\n");
return 0;
}
|
ck.c | /*
Maximilien Danisch and Qinna Wang
January 2015
http://bit.ly/maxdan94
maximilien.danisch@telecom-paristech.fr
Info:
Feel free to use these lines as you wish. This program enumerate all k-cliques.
To compile:
"gcc ck.c -O9 -o ck -fopenmp".
To execute:
"./ck p k edgelist.txt".
p is the number of processors to use.
k of k-clique to enumerate.
"edgelist.txt" should contain the graph: one edge on each line separated by a space.
Will print the number of l-cliques for l in [1,k].
Note:
parallelisation over edges and increasing core ordering
*/
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include <time.h>
#define NLINKS 100000000 //maximum number of edges for memory allocation, will increase if needed
#define PAD sizeof(int)*16 //use a padding space of 64 bytes after private heap arrays in order to avoid false sharing among threads.
// heap data structure :
typedef struct {
unsigned key;
unsigned value;
} keyvalue;
typedef struct {
unsigned n_max; // max number of nodes.
unsigned n; // number of nodes.
unsigned *pt; // pointers to nodes.
keyvalue *kv; // nodes.
} bheap;
bheap *construct(unsigned n_max){
unsigned i;
bheap *heap=malloc(sizeof(bheap));
heap->n_max=n_max;
heap->n=0;
heap->pt=malloc(n_max*sizeof(unsigned));
for (i=0;i<n_max;i++) heap->pt[i]=-1;
heap->kv=malloc(n_max*sizeof(keyvalue));
return heap;
}
inline void swap(bheap *heap,unsigned i, unsigned j) {
keyvalue kv_tmp=heap->kv[i];
unsigned pt_tmp=heap->pt[kv_tmp.key];
heap->pt[heap->kv[i].key]=heap->pt[heap->kv[j].key];
heap->kv[i]=heap->kv[j];
heap->pt[heap->kv[j].key]=pt_tmp;
heap->kv[j]=kv_tmp;
}
inline void bubble_up(bheap *heap,unsigned i) {
unsigned j=(i-1)/2;
while (i>0) {
if (heap->kv[j].value>heap->kv[i].value) {
swap(heap,i,j);
i=j;
j=(i-1)/2;
}
else break;
}
}
inline void bubble_down(bheap *heap) {
unsigned i=0,j1=1,j2=2,j;
while (j1<heap->n) {
j=( (j2<heap->n) && (heap->kv[j2].value<heap->kv[j1].value) ) ? j2 : j1 ;
if (heap->kv[j].value < heap->kv[i].value) {
swap(heap,i,j);
i=j;
j1=2*i+1;
j2=j1+1;
continue;
}
break;
}
}
inline void insert(bheap *heap,keyvalue kv){
heap->pt[kv.key]=(heap->n)++;
heap->kv[heap->n-1]=kv;
bubble_up(heap,heap->n-1);
}
inline void update(bheap *heap,unsigned key){
unsigned i=heap->pt[key];
if (i!=-1){
((heap->kv[i]).value)--;
bubble_up(heap,i);
}
}
inline keyvalue popmin(bheap *heap){
keyvalue min=heap->kv[0];
heap->pt[min.key]=-1;
heap->kv[0]=heap->kv[--(heap->n)];
heap->pt[heap->kv[0].key]=0;
bubble_down(heap);
return min;
}
// graph datastructure:
typedef struct {
unsigned s;
unsigned t;
} edge;
typedef struct {
//edge list structure:
unsigned n; //number of nodes
unsigned e; //number of edges
unsigned n2; //number of nodes with core value larger than one
unsigned e2; //number of edges between nodes with core value larger than one
edge *edges;//list of edges
//to compute a degeneracy ordering:
unsigned *d0; //degrees
unsigned *cd0; //cumulative degree: (start with 0) length=dim+1
unsigned *adj0; //list of neighbors
unsigned *rank; //degeneracy rankings of nodes
unsigned core; //core number of the graph
//truncated neighborhoods:
unsigned *d; //truncated degrees
unsigned *cd; //cumulative degree: (start with 0) length=dim+1
unsigned *adj; //list of neighbors with higher rank
} sparse;
//compute the maximum of three unsigned
inline unsigned max3(unsigned a,unsigned b,unsigned c){
a=(a>b) ? a : b;
return (a>c) ? a : c;
}
//reading the edgelist from file
sparse* readedgelist(char* edgelist){
unsigned e1=NLINKS;
sparse *g=malloc(sizeof(sparse));
FILE *file;
g->n=0;
g->e=0;
file=fopen(edgelist,"r");
g->edges=malloc(e1*sizeof(edge));
while (fscanf(file,"%u %u", &(g->edges[g->e].s), &(g->edges[g->e].t))==2) {
g->n=max3(g->n,g->edges[g->e].s,g->edges[g->e].t);
if (g->e++==e1) {
e1+=NLINKS;
g->edges=realloc(g->edges,e1*sizeof(edge));
}
}
fclose(file);
g->n++;
g->edges=realloc(g->edges,g->e*sizeof(edge));
return g;
}
//Building the graph structure
void mkgraph(sparse *g){
unsigned i;
g->d0=calloc(g->n,sizeof(unsigned));
for (i=0;i<g->e;i++) {
g->d0[g->edges[i].s]++;
g->d0[g->edges[i].t]++;
}
g->cd0=malloc((g->n+1)*sizeof(unsigned));
g->cd0[0]=0;
for (i=1;i<g->n+1;i++) {
g->cd0[i]=g->cd0[i-1]+g->d0[i-1];
g->d0[i-1]=0;
}
g->adj0=malloc(2*g->e*sizeof(unsigned));
for (i=0;i<g->e;i++) {
g->adj0[ g->cd0[g->edges[i].s] + g->d0[ g->edges[i].s ]++ ]=g->edges[i].t;
g->adj0[ g->cd0[g->edges[i].t] + g->d0[ g->edges[i].t ]++ ]=g->edges[i].s;
}
}
//Building the heap structure with (key,value)=(node,degree) for each node
bheap* mkheap(sparse *g){
unsigned i;
keyvalue kv;
bheap* heap=construct(g->n);
for (i=0;i<g->n;i++){
kv.key=i;
kv.value=g->d0[i];
insert(heap,kv);
}
return heap;
}
void freeheap(bheap *heap){
free(heap->pt);
free(heap->kv);
free(heap);
}
//computing degeneracy ordering and core value
void kcore(sparse* g){
unsigned i,j,r=0,n=g->n;
keyvalue kv;
unsigned c=0;//the core number
bheap *heap=mkheap(g);
g->rank=malloc(g->n*sizeof(unsigned));
for (i=0;i<g->n;i++){
kv=popmin(heap);
if (kv.value>c){
c=kv.value;
}
if (c<2){//remove node with core value less than 2
g->rank[kv.key]=-1;
n--;
}
else{
g->rank[kv.key]=n-(++r);
}
for (j=g->cd0[kv.key];j<g->cd0[kv.key+1];j++){
update(heap,g->adj0[j]);
}
}
freeheap(heap);
free(g->d0);
free(g->cd0);
free(g->adj0);
g->core=c;
g->n2=n;
}
void relabelnodes(sparse *g) {
unsigned i,j,source,target;
j=0;
for (i=0;i<g->e;i++) {
source=g->rank[g->edges[i].s];
target=g->rank[g->edges[i].t];
if (source==-1 || target==-1){
continue;
}
if (source<target) {
g->edges[j].s=target;
g->edges[j++].t=source;
}
else {
g->edges[j].s=source;
g->edges[j++].t=target;
}
}
g->e2=j;
g->edges=realloc(g->edges,g->e2*sizeof(edge));
}
//for future use in qsort
int cmpfunc (const void * a, const void * b){
if (*(unsigned*)a>*(unsigned*)b){
return 1;
}
return -1;
}
//Building the special graph structure
void mkspecial(sparse *g){
unsigned i;
g->d=calloc(g->n2,sizeof(unsigned));
for (i=0;i<g->e2;i++) {
g->d[g->edges[i].s]++;
}
g->cd=malloc((g->n2+1)*sizeof(unsigned));
g->cd[0]=0;
for (i=1;i<g->n2+1;i++) {
g->cd[i]=g->cd[i-1]+g->d[i-1];
g->d[i-1]=0;
}
g->adj=malloc((g->e2)*sizeof(unsigned));
for (i=0;i<g->e2;i++) {
g->adj[g->cd[g->edges[i].s] + g->d[ g->edges[i].s ]++ ]=g->edges[i].t;
}
#pragma omp parallel for private(i)
for (i=0;i<g->n2;i++) {
qsort(&g->adj[g->cd[i]],g->d[i],sizeof(unsigned),cmpfunc);
}
//free(g->edges); Can be freed if node parallelisation is used instead of edge
}
void freesparse(sparse *g){
free(g->edges);
free(g->rank);
free(g->d);
free(g->cd);
free(g->adj);
free(g);
}
//store the intersection of list1 and list2 in list3 and return the size of list3 (the 3 lists are sorted)
inline unsigned merging(unsigned *list1, unsigned s1, unsigned *list2, unsigned s2,unsigned *list3){
unsigned i=0,j=0,s3=0;
unsigned x=list1[0],y=list2[0];
while (i<s1 && j<s2){
if(x<y){
x=list1[++i];
}
else if(y<x){
y=list2[++j];
}
else{
list3[s3++]=x;
y=list2[++j];
x=list1[++i];
}
}
return s3;
}
//return the size of the intersection of list1 and list2
inline unsigned merging2(unsigned *list1, unsigned s1, unsigned *list2, unsigned s2){
unsigned i=0,j=0,s3=0;
unsigned x=list1[0],y=list2[0];
while (i<s1 && j<s2){
if(x<y){
x=list1[++i];
}
else if(y<x){
y=list2[++j];
}
else{
s3++;
y=list2[++j];
x=list1[++i];
}
}
return s3;
}
//the recursion to compute all possible intersections
void recursion(unsigned kmax, unsigned k, unsigned* merge, unsigned* size, sparse* g, unsigned long long* nck){
unsigned t=(k-3)*g->core,t2=t+g->core;
unsigned i, u;
if (k==kmax-1){
for(i=1; i<size[k-3]; i++){//Astuce: when i=0; no adjacent node in merge;
u=merge[t+i];
size[k-2]=merging2(&merge[t],i,&g->adj[g->cd[u]],g->d[u]);
nck[k]+=size[k-2];
}
return;
}
for(i=1; i<size[k-3]; i++){
u=merge[t+i];
size[k-2]=merging(&g->adj[g->cd[u]],g->d[u],&merge[t],i,&merge[t2]);
nck[k]+=size[k-2];
recursion(kmax, k+1, merge, size, g, nck);
}
}
//one pass over all k-cliques
unsigned long long *onepass(sparse *g,unsigned kmax){
unsigned e,u,v,k;
unsigned *merge,*size;
unsigned long long *nck=calloc(kmax,sizeof(unsigned long long)),*nck_p;
nck[0]=g->n;
nck[1]=g->e;
if (kmax>2){
#pragma omp parallel private(merge,size,nck_p,e,u,v,k)
{
size=malloc((kmax-2)*sizeof(unsigned)+PAD);
nck_p=calloc(kmax,sizeof(unsigned long long)+PAD);
if (kmax==3){
#pragma omp for schedule(dynamic, 1) nowait
for(e=0; e<g->e2; e++){
u=g->edges[e].s;
v=g->edges[e].t;
size[0]=merging2(&(g->adj[g->cd[u]]),g->d[u],&(g->adj[g->cd[v]]),g->d[v]);
nck_p[2]+=size[0];
}
}
else{
merge=malloc((kmax-2)*g->core*sizeof(unsigned)+PAD);
#pragma omp for schedule(dynamic, 1) nowait
for(e=0; e<g->e2; e++){
u=g->edges[e].s;
v=g->edges[e].t;
size[0]=merging(&(g->adj[g->cd[u]]),g->d[u],&(g->adj[g->cd[v]]),g->d[v],merge);
nck_p[2]+=size[0];
recursion(kmax,3,merge,size,g,nck_p);
}
free(merge);
}
free(size);
#pragma omp critical
{
for (k=2;k<kmax;k++){
nck[k]+=nck_p[k];
}
free(nck_p);
}
}
}
return nck;
}
int main(int argc,char** argv){
sparse* g;
unsigned i, kmax=atoi(argv[2]);
unsigned long long *nck;
omp_set_num_threads(atoi(argv[1]));
time_t t0,t1,t2;
t1=time(NULL);
t0=t1;
printf("Reading edgelist from file %s\n",argv[3]);
g=readedgelist(argv[3]);
t2=time(NULL);
printf("- Time = %ldh%ldm%lds\n",(t2-t1)/3600,((t2-t1)%3600)/60,((t2-t1)%60));
t1=t2;
printf("Number of nodes: %u\n",g->n);
printf("Number of edges: %u\n",g->e);
printf("Building the graph structure\n");
mkgraph(g);
printf("Computing degeneracy ordering\n");
kcore(g);
relabelnodes(g);
printf("Number of nodes (with core value > 1): %u\n",g->n2);
printf("Number of edges (between nodes with core value > 1): %u\n",g->e2);
printf("Core number = %u\n",g->core);
mkspecial(g);
t2=time(NULL);
printf("- Time = %ldh%ldm%lds\n",(t2-t1)/3600,((t2-t1)%3600)/60,((t2-t1)%60));
t1=t2;
printf("Enumerating all %u-cliques\n",kmax);
nck=onepass(g,kmax);
for(i=0;i<kmax;i++){
printf("Number of %u-cliques: %llu\n",i+1,nck[i]);
}
freesparse(g);
t2=time(NULL);
printf("- Time = %ldh%ldm%lds\n",(t2-t1)/3600,((t2-t1)%3600)/60,((t2-t1)%60));
t1=t2;
printf("- Overall time = %ldh%ldm%lds\n",(t2-t0)/3600,((t2-t0)%3600)/60,((t2-t0)%60));
return 0;
}
|
matrix.c | #include <stdlib.h>
#include "matrix.h"
#define MAT_THREADS 8
void matAlloc(double **mat) {
#pragma omp parallel for num_threads(MAT_THREADS) shared(mat)
for (int i = 0; i < N; ++i) {
mat[i] = malloc(N * sizeof(double));
}
}
void matFill(double **mat) {
#pragma omp parallel for num_threads(MAT_THREADS) shared(mat)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
mat[i][j] = (double)rand();
}
}
}
void matFree(double **mat) {
#pragma omp parallel for num_threads(MAT_THREADS) shared(mat)
for (int i = 0; i < N; ++i) {
free(mat[i]);
}
free(mat);
}
double **matTrans(double **mat) {
double **mat_trans = malloc(N * sizeof(double *));
matAlloc(mat_trans);
#pragma omp parallel for num_threads(MAT_THREADS) shared(mat, mat_trans)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
mat_trans[i][j] = mat[j][i];
}
}
return mat_trans;
} |
GB_unaryop__lnot_fp32_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_uint64
// op(A') function: GB_tran__lnot_fp32_uint64
// C type: float
// A type: uint64_t
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_uint64
(
float *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
for_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 -verify %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp for simd
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp for simd'}}
#pragma omp for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp for simd'}}
#pragma omp for simd foo
void test_no_clause() {
int i;
#pragma omp for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp for simd' must be a for loop}}
#pragma omp for simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd;
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_safelen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp for simd safelen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd safelen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp for simd safelen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp for simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp for simd safelen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp for simd safelen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp for simd safelen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp for simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp for simd simdlen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd simdlen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp for simd simdlen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp for simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp for simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp for simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp for simd simdlen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp for simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_safelen_simdlen() {
int i;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp for simd simdlen(6) safelen(5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp for simd safelen(5) simdlen(6)
for (i = 0; i < 16; ++i)
;
}
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp for simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp for simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
#pragma omp for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{integer constant expression}}
#pragma omp for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{integer constant expression}}
#pragma omp for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd collapse(2)
for (i = 0; i < 16; ++i) // expected-note {{defined as lastprivate}}
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for simd' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 2 {{reduction variable must be shared}}
// expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp for simd reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_linear() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp for simd linear(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd linear()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd linear(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp for simd linear(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp for simd linear(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp for simd linear(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp for simd linear(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// expected-error@+1 {{expected expression}}
#pragma omp for simd linear(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp for simd linear(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd linear(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be linear}}
#pragma omp for simd linear(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as private}}
// expected-error@+1 {{private variable cannot be linear}}
#pragma omp for simd private(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be private}}
#pragma omp for simd linear(x) private(x)
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}}
#pragma omp for simd linear(x, y : 0)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be lastprivate}}
#pragma omp for simd linear(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-note@+2 {{defined as lastprivate}}
// expected-error@+1 {{lastprivate variable cannot be linear}}
#pragma omp for simd lastprivate(x) linear(x)
for (i = 0; i < 16; ++i)
;
}
void test_aligned() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp for simd aligned(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd aligned()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd aligned(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp for simd aligned(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp for simd aligned(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
int *x, y, z[25]; // expected-note 4 {{'y' defined here}}
#pragma omp for simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd aligned(z)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd aligned(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp for simd aligned(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd aligned(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as aligned}}
// expected-error@+1 {{a variable cannot appear in more than one aligned clause}}
#pragma omp for simd aligned(x) aligned(z, x)
for (i = 0; i < 16; ++i)
;
// expected-note@+3 {{defined as aligned}}
// expected-error@+2 {{a variable cannot appear in more than one aligned clause}}
// expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp for simd aligned(x, y, z) aligned(y, z)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp for simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp for simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
void test_nontemporal() {
int i;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd nontemporal(
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd nontemporal(,
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 2 {{expected expression}}
#pragma omp for simd nontemporal(, )
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected expression}}
#pragma omp for simd nontemporal()
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected expression}}
#pragma omp for simd nontemporal(int)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} omp50-error@+1 {{expected variable name}}
#pragma omp for simd nontemporal(0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp for simd nontemporal(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp for simd nontemporal(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp for simd nontemporal(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd nontemporal(x :)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}}
#pragma omp for simd nontemporal(x :, )
for (i = 0; i < 16; ++i)
;
// omp50-note@+2 {{defined as nontemporal}}
// omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}}
#pragma omp for simd nontemporal(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}}
#pragma omp for simd private(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}}
#pragma omp for simd nontemporal(x) private(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}}
#pragma omp for simd nontemporal(x, y : 0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}}
#pragma omp for simd nontemporal(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}}
#pragma omp for simd lastprivate(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} expected-error {{expected '(' after 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp for simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp for simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp for simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}}
for (int i = 0; i < 10; ++i)
;
#pragma omp for simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}}
for (int i = 0; i < 10; ++i)
;
}
|
omptarget.h | //===---- omptarget.h - OpenMP GPU initialization ---------------- CUDA -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declarations of all library macros, types,
// and functions.
//
//===----------------------------------------------------------------------===//
#ifndef OMPTARGET_H
#define OMPTARGET_H
#include "common/allocator.h"
#include "common/debug.h" // debug
#include "common/state-queue.h"
#include "common/support.h"
#include "interface.h" // interfaces with omp, compiler, and user
#include "target_impl.h"
#define OMPTARGET_NVPTX_VERSION 1.1
// used by the library for the interface with the app
#define DISPATCH_FINISHED 0
#define DISPATCH_NOTFINISHED 1
// used by dynamic scheduling
#define FINISHED 0
#define NOT_FINISHED 1
#define LAST_CHUNK 2
#define BARRIER_COUNTER 0
#define ORDERED_COUNTER 1
// Worker slot type which is initialized with the default worker slot
// size of 4*32 bytes.
struct __kmpc_data_sharing_slot {
__kmpc_data_sharing_slot *Next;
__kmpc_data_sharing_slot *Prev;
void *PrevSlotStackPtr;
void *DataEnd;
char Data[DS_Worker_Warp_Slot_Size];
};
////////////////////////////////////////////////////////////////////////////////
// task ICV and (implicit & explicit) task state
class omptarget_nvptx_TaskDescr {
public:
// methods for flags
INLINE omp_sched_t GetRuntimeSched() const;
INLINE void SetRuntimeSched(omp_sched_t sched);
INLINE int InParallelRegion() const { return items.flags & TaskDescr_InPar; }
INLINE int InL2OrHigherParallelRegion() const {
return items.flags & TaskDescr_InParL2P;
}
INLINE int IsParallelConstruct() const {
return items.flags & TaskDescr_IsParConstr;
}
INLINE int IsTaskConstruct() const { return !IsParallelConstruct(); }
// methods for other fields
INLINE uint16_t &ThreadId() { return items.threadId; }
INLINE uint64_t &RuntimeChunkSize() { return items.runtimeChunkSize; }
INLINE omptarget_nvptx_TaskDescr *GetPrevTaskDescr() const { return prev; }
INLINE void SetPrevTaskDescr(omptarget_nvptx_TaskDescr *taskDescr) {
prev = taskDescr;
}
// init & copy
INLINE void InitLevelZeroTaskDescr();
INLINE void InitLevelOneTaskDescr(omptarget_nvptx_TaskDescr *parentTaskDescr);
INLINE void Copy(omptarget_nvptx_TaskDescr *sourceTaskDescr);
INLINE void CopyData(omptarget_nvptx_TaskDescr *sourceTaskDescr);
INLINE void CopyParent(omptarget_nvptx_TaskDescr *parentTaskDescr);
INLINE void CopyForExplicitTask(omptarget_nvptx_TaskDescr *parentTaskDescr);
INLINE void CopyToWorkDescr(omptarget_nvptx_TaskDescr *masterTaskDescr);
INLINE void CopyFromWorkDescr(omptarget_nvptx_TaskDescr *workTaskDescr);
INLINE void CopyConvergentParent(omptarget_nvptx_TaskDescr *parentTaskDescr,
uint16_t tid, uint16_t tnum);
INLINE void SaveLoopData();
INLINE void RestoreLoopData() const;
private:
// bits for flags: (6 used, 2 free)
// 3 bits (SchedMask) for runtime schedule
// 1 bit (InPar) if this thread has encountered one or more parallel region
// 1 bit (IsParConstr) if ICV for a parallel region (false = explicit task)
// 1 bit (InParL2+) if this thread has encountered L2 or higher parallel
// region
static const uint8_t TaskDescr_SchedMask = (0x1 | 0x2 | 0x4);
static const uint8_t TaskDescr_InPar = 0x10;
static const uint8_t TaskDescr_IsParConstr = 0x20;
static const uint8_t TaskDescr_InParL2P = 0x40;
struct SavedLoopDescr_items {
int64_t loopUpperBound;
int64_t nextLowerBound;
int64_t chunk;
int64_t stride;
kmp_sched_t schedule;
} loopData;
struct TaskDescr_items {
uint8_t flags; // 6 bit used (see flag above)
uint8_t unused;
uint16_t threadId; // thread id
uint64_t runtimeChunkSize; // runtime chunk size
} items;
omptarget_nvptx_TaskDescr *prev;
};
// build on kmp
typedef struct omptarget_nvptx_ExplicitTaskDescr {
omptarget_nvptx_TaskDescr
taskDescr; // omptarget_nvptx task description (must be first)
kmp_TaskDescr kmpTaskDescr; // kmp task description (must be last)
} omptarget_nvptx_ExplicitTaskDescr;
////////////////////////////////////////////////////////////////////////////////
// Descriptor of a parallel region (worksharing in general)
class omptarget_nvptx_WorkDescr {
public:
// access to data
INLINE omptarget_nvptx_TaskDescr *WorkTaskDescr() { return &masterTaskICV; }
private:
omptarget_nvptx_TaskDescr masterTaskICV;
};
////////////////////////////////////////////////////////////////////////////////
class omptarget_nvptx_TeamDescr {
public:
// access to data
INLINE omptarget_nvptx_TaskDescr *LevelZeroTaskDescr() {
return &levelZeroTaskDescr;
}
INLINE omptarget_nvptx_WorkDescr &WorkDescr() {
return workDescrForActiveParallel;
}
// init
INLINE void InitTeamDescr();
INLINE __kmpc_data_sharing_slot *GetPreallocatedSlotAddr(int wid) {
worker_rootS[wid].DataEnd =
&worker_rootS[wid].Data[0] + DS_Worker_Warp_Slot_Size;
// We currently do not have a next slot.
worker_rootS[wid].Next = 0;
worker_rootS[wid].Prev = 0;
worker_rootS[wid].PrevSlotStackPtr = 0;
return (__kmpc_data_sharing_slot *)&worker_rootS[wid];
}
private:
omptarget_nvptx_TaskDescr
levelZeroTaskDescr; // icv for team master initial thread
omptarget_nvptx_WorkDescr
workDescrForActiveParallel; // one, ONLY for the active par
ALIGN(16)
__kmpc_data_sharing_slot worker_rootS[DS_Max_Warp_Number];
};
////////////////////////////////////////////////////////////////////////////////
// thread private data (struct of arrays for better coalescing)
// tid refers here to the global thread id
// do not support multiple concurrent kernel a this time
class omptarget_nvptx_ThreadPrivateContext {
public:
// task
INLINE omptarget_nvptx_TaskDescr *Level1TaskDescr(int tid) {
return &levelOneTaskDescr[tid];
}
INLINE void SetTopLevelTaskDescr(int tid,
omptarget_nvptx_TaskDescr *taskICV) {
topTaskDescr[tid] = taskICV;
}
INLINE omptarget_nvptx_TaskDescr *GetTopLevelTaskDescr(int tid) const;
// parallel
INLINE uint16_t &NumThreadsForNextParallel(int tid) {
return nextRegion.tnum[tid];
}
// schedule (for dispatch)
INLINE kmp_sched_t &ScheduleType(int tid) { return schedule[tid]; }
INLINE int64_t &Chunk(int tid) { return chunk[tid]; }
INLINE int64_t &LoopUpperBound(int tid) { return loopUpperBound[tid]; }
INLINE int64_t &NextLowerBound(int tid) { return nextLowerBound[tid]; }
INLINE int64_t &Stride(int tid) { return stride[tid]; }
INLINE omptarget_nvptx_TeamDescr &TeamContext() { return teamContext; }
INLINE void InitThreadPrivateContext(int tid);
INLINE uint64_t &Cnt() { return cnt; }
private:
// team context for this team
omptarget_nvptx_TeamDescr teamContext;
// task ICV for implicit threads in the only parallel region
omptarget_nvptx_TaskDescr levelOneTaskDescr[MAX_THREADS_PER_TEAM];
// pointer where to find the current task ICV (top of the stack)
omptarget_nvptx_TaskDescr *topTaskDescr[MAX_THREADS_PER_TEAM];
union {
// Only one of the two is live at the same time.
// parallel
uint16_t tnum[MAX_THREADS_PER_TEAM];
} nextRegion;
// schedule (for dispatch)
kmp_sched_t schedule[MAX_THREADS_PER_TEAM]; // remember schedule type for #for
int64_t chunk[MAX_THREADS_PER_TEAM];
int64_t loopUpperBound[MAX_THREADS_PER_TEAM];
// state for dispatch with dyn/guided OR static (never use both at a time)
int64_t nextLowerBound[MAX_THREADS_PER_TEAM];
int64_t stride[MAX_THREADS_PER_TEAM];
uint64_t cnt;
};
/// Memory manager for statically allocated memory.
class omptarget_nvptx_SimpleMemoryManager {
private:
struct MemDataTy {
volatile unsigned keys[OMP_STATE_COUNT];
} MemData[MAX_SM] ALIGN(128);
INLINE static uint32_t hash(unsigned key) {
return key & (OMP_STATE_COUNT - 1);
}
public:
INLINE void Release();
INLINE const void *Acquire(const void *buf, size_t size);
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// global data tables
////////////////////////////////////////////////////////////////////////////////
extern omptarget_nvptx_SimpleMemoryManager omptarget_nvptx_simpleMemoryManager;
extern uint32_t EXTERN_SHARED(usedMemIdx);
extern uint32_t EXTERN_SHARED(usedSlotIdx);
#if _OPENMP
extern uint8_t parallelLevel[MAX_THREADS_PER_TEAM / WARPSIZE];
#pragma omp allocate(parallelLevel) allocator(omp_pteam_mem_alloc)
#else
extern uint8_t EXTERN_SHARED(parallelLevel)[MAX_THREADS_PER_TEAM / WARPSIZE];
#endif
extern uint16_t EXTERN_SHARED(threadLimit);
extern uint16_t EXTERN_SHARED(threadsInTeam);
extern uint16_t EXTERN_SHARED(nThreads);
extern omptarget_nvptx_ThreadPrivateContext *
EXTERN_SHARED(omptarget_nvptx_threadPrivateContext);
extern uint32_t EXTERN_SHARED(execution_param);
extern void *EXTERN_SHARED(ReductionScratchpadPtr);
////////////////////////////////////////////////////////////////////////////////
// work function (outlined parallel/simd functions) and arguments.
// needed for L1 parallelism only.
////////////////////////////////////////////////////////////////////////////////
typedef void *omptarget_nvptx_WorkFn;
extern volatile omptarget_nvptx_WorkFn EXTERN_SHARED(omptarget_nvptx_workFn);
////////////////////////////////////////////////////////////////////////////////
// get private data structures
////////////////////////////////////////////////////////////////////////////////
INLINE omptarget_nvptx_TeamDescr &getMyTeamDescriptor();
INLINE omptarget_nvptx_WorkDescr &getMyWorkDescriptor();
INLINE omptarget_nvptx_TaskDescr *
getMyTopTaskDescriptor(bool isSPMDExecutionMode);
INLINE omptarget_nvptx_TaskDescr *getMyTopTaskDescriptor(int globalThreadId);
////////////////////////////////////////////////////////////////////////////////
// inlined implementation
////////////////////////////////////////////////////////////////////////////////
INLINE uint32_t __kmpc_impl_ffs(uint32_t x) { return __builtin_ffs(x); }
INLINE uint32_t __kmpc_impl_popc(uint32_t x) { return __builtin_popcount(x); }
INLINE uint32_t __kmpc_impl_ffs(uint64_t x) { return __builtin_ffsl(x); }
INLINE uint32_t __kmpc_impl_popc(uint64_t x) { return __builtin_popcountl(x); }
#include "common/omptargeti.h"
#endif
|
GB_unaryop__lnot_fp32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_int64
// op(A') function: GB_tran__lnot_fp32_int64
// C type: float
// A type: int64_t
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_int64
(
float *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tree-parloops.c | /* Loop autoparallelization.
Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
Contributed by Sebastian Pop <pop@cri.ensmp.fr> and
Zdenek Dvorak <dvorakz@suse.cz>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "rtl.h"
#include "tree-flow.h"
#include "cfgloop.h"
#include "ggc.h"
#include "tree-data-ref.h"
#include "diagnostic.h"
#include "tree-pass.h"
#include "tree-scalar-evolution.h"
#include "hashtab.h"
#include "langhooks.h"
#include "tree-vectorizer.h"
/* This pass tries to distribute iterations of loops into several threads.
The implementation is straightforward -- for each loop we test whether its
iterations are independent, and if it is the case (and some additional
conditions regarding profitability and correctness are satisfied), we
add GIMPLE_OMP_PARALLEL and GIMPLE_OMP_FOR codes and let omp expansion
machinery do its job.
The most of the complexity is in bringing the code into shape expected
by the omp expanders:
-- for GIMPLE_OMP_FOR, ensuring that the loop has only one induction
variable and that the exit test is at the start of the loop body
-- for GIMPLE_OMP_PARALLEL, replacing the references to local addressable
variables by accesses through pointers, and breaking up ssa chains
by storing the values incoming to the parallelized loop to a structure
passed to the new function as an argument (something similar is done
in omp gimplification, unfortunately only a small part of the code
can be shared).
TODO:
-- if there are several parallelizable loops in a function, it may be
possible to generate the threads just once (using synchronization to
ensure that cross-loop dependences are obeyed).
-- handling of common scalar dependence patterns (accumulation, ...)
-- handling of non-innermost loops */
/*
Reduction handling:
currently we use vect_is_simple_reduction() to detect reduction patterns.
The code transformation will be introduced by an example.
parloop
{
int sum=1;
for (i = 0; i < N; i++)
{
x[i] = i + 3;
sum+=x[i];
}
}
gimple-like code:
header_bb:
# sum_29 = PHI <sum_11(5), 1(3)>
# i_28 = PHI <i_12(5), 0(3)>
D.1795_8 = i_28 + 3;
x[i_28] = D.1795_8;
sum_11 = D.1795_8 + sum_29;
i_12 = i_28 + 1;
if (N_6(D) > i_12)
goto header_bb;
exit_bb:
# sum_21 = PHI <sum_11(4)>
printf (&"%d"[0], sum_21);
after reduction transformation (only relevant parts):
parloop
{
....
# Storing the initial value given by the user. #
.paral_data_store.32.sum.27 = 1;
#pragma omp parallel num_threads(4)
#pragma omp for schedule(static)
# The neutral element corresponding to the particular
reduction's operation, e.g. 0 for PLUS_EXPR,
1 for MULT_EXPR, etc. replaces the user's initial value. #
# sum.27_29 = PHI <sum.27_11, 0>
sum.27_11 = D.1827_8 + sum.27_29;
GIMPLE_OMP_CONTINUE
# Adding this reduction phi is done at create_phi_for_local_result() #
# sum.27_56 = PHI <sum.27_11, 0>
GIMPLE_OMP_RETURN
# Creating the atomic operation is done at
create_call_for_reduction_1() #
#pragma omp atomic_load
D.1839_59 = *&.paral_data_load.33_51->reduction.23;
D.1840_60 = sum.27_56 + D.1839_59;
#pragma omp atomic_store (D.1840_60);
GIMPLE_OMP_RETURN
# collecting the result after the join of the threads is done at
create_loads_for_reductions().
The value computed by the threads is loaded from the
shared struct. #
.paral_data_load.33_52 = &.paral_data_store.32;
sum_37 = .paral_data_load.33_52->sum.27;
sum_43 = D.1795_41 + sum_37;
exit bb:
# sum_21 = PHI <sum_43, sum_26>
printf (&"%d"[0], sum_21);
...
}
*/
/* Minimal number of iterations of a loop that should be executed in each
thread. */
#define MIN_PER_THREAD 100
/* Element of the hashtable, representing a
reduction in the current loop. */
struct reduction_info
{
gimple reduc_stmt; /* reduction statement. */
gimple reduc_phi; /* The phi node defining the reduction. */
enum tree_code reduction_code;/* code for the reduction operation. */
gimple keep_res; /* The PHI_RESULT of this phi is the resulting value
of the reduction variable when existing the loop. */
tree initial_value; /* The initial value of the reduction var before entering the loop. */
tree field; /* the name of the field in the parloop data structure intended for reduction. */
tree init; /* reduction initialization value. */
gimple new_phi; /* (helper field) Newly created phi node whose result
will be passed to the atomic operation. Represents
the local result each thread computed for the reduction
operation. */
};
/* Equality and hash functions for hashtab code. */
static int
reduction_info_eq (const void *aa, const void *bb)
{
const struct reduction_info *a = (const struct reduction_info *) aa;
const struct reduction_info *b = (const struct reduction_info *) bb;
return (a->reduc_phi == b->reduc_phi);
}
static hashval_t
reduction_info_hash (const void *aa)
{
const struct reduction_info *a = (const struct reduction_info *) aa;
return htab_hash_pointer (a->reduc_phi);
}
static struct reduction_info *
reduction_phi (htab_t reduction_list, gimple phi)
{
struct reduction_info tmpred, *red;
if (htab_elements (reduction_list) == 0)
return NULL;
tmpred.reduc_phi = phi;
red = (struct reduction_info *) htab_find (reduction_list, &tmpred);
return red;
}
/* Element of hashtable of names to copy. */
struct name_to_copy_elt
{
unsigned version; /* The version of the name to copy. */
tree new_name; /* The new name used in the copy. */
tree field; /* The field of the structure used to pass the
value. */
};
/* Equality and hash functions for hashtab code. */
static int
name_to_copy_elt_eq (const void *aa, const void *bb)
{
const struct name_to_copy_elt *a = (const struct name_to_copy_elt *) aa;
const struct name_to_copy_elt *b = (const struct name_to_copy_elt *) bb;
return a->version == b->version;
}
static hashval_t
name_to_copy_elt_hash (const void *aa)
{
const struct name_to_copy_elt *a = (const struct name_to_copy_elt *) aa;
return (hashval_t) a->version;
}
/* Returns true if the iterations of LOOP are independent on each other (that
is, if we can execute them in parallel), and if LOOP satisfies other
conditions that we need to be able to parallelize it. Description of number
of iterations is stored to NITER. Reduction analysis is done, if
reductions are found, they are inserted to the REDUCTION_LIST. */
static bool
loop_parallel_p (struct loop *loop, htab_t reduction_list,
struct tree_niter_desc *niter)
{
edge exit = single_dom_exit (loop);
VEC (ddr_p, heap) * dependence_relations;
VEC (data_reference_p, heap) *datarefs;
lambda_trans_matrix trans;
bool ret = false;
gimple_stmt_iterator gsi;
loop_vec_info simple_loop_info;
/* Only consider innermost loops with just one exit. The innermost-loop
restriction is not necessary, but it makes things simpler. */
if (loop->inner || !exit)
return false;
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\nConsidering loop %d\n", loop->num);
/* We need to know # of iterations, and there should be no uses of values
defined inside loop outside of it, unless the values are invariants of
the loop. */
if (!number_of_iterations_exit (loop, exit, niter, false))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " FAILED: number of iterations not known\n");
return false;
}
vect_dump = NULL;
simple_loop_info = vect_analyze_loop_form (loop);
for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi = gsi_stmt (gsi);
gimple reduc_stmt = NULL;
/* ??? TODO: Change this into a generic function that
recognizes reductions. */
if (!is_gimple_reg (PHI_RESULT (phi)))
continue;
if (simple_loop_info)
reduc_stmt = vect_is_simple_reduction (simple_loop_info, phi);
/* Create a reduction_info struct, initialize it and insert it to
the reduction list. */
if (reduc_stmt)
{
PTR *slot;
struct reduction_info *new_reduction;
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file,
"Detected reduction. reduction stmt is: \n");
print_gimple_stmt (dump_file, reduc_stmt, 0, 0);
fprintf (dump_file, "\n");
}
new_reduction = XCNEW (struct reduction_info);
new_reduction->reduc_stmt = reduc_stmt;
new_reduction->reduc_phi = phi;
new_reduction->reduction_code = gimple_assign_rhs_code (reduc_stmt);
slot = htab_find_slot (reduction_list, new_reduction, INSERT);
*slot = new_reduction;
}
}
/* Get rid of the information created by the vectorizer functions. */
destroy_loop_vec_info (simple_loop_info, true);
for (gsi = gsi_start_phis (exit->dest); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi = gsi_stmt (gsi);
struct reduction_info *red;
imm_use_iterator imm_iter;
use_operand_p use_p;
gimple reduc_phi;
tree val = PHI_ARG_DEF_FROM_EDGE (phi, exit);
if (is_gimple_reg (val))
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "phi is ");
print_gimple_stmt (dump_file, phi, 0, 0);
fprintf (dump_file, "arg of phi to exit: value ");
print_generic_expr (dump_file, val, 0);
fprintf (dump_file, " used outside loop\n");
fprintf (dump_file,
" checking if it a part of reduction pattern: \n");
}
if (htab_elements (reduction_list) == 0)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
" FAILED: it is not a part of reduction.\n");
return false;
}
reduc_phi = NULL;
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, val)
{
if (flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
{
reduc_phi = USE_STMT (use_p);
break;
}
}
red = reduction_phi (reduction_list, reduc_phi);
if (red == NULL)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
" FAILED: it is not a part of reduction.\n");
return false;
}
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "reduction phi is ");
print_gimple_stmt (dump_file, red->reduc_phi, 0, 0);
fprintf (dump_file, "reduction stmt is ");
print_gimple_stmt (dump_file, red->reduc_stmt, 0, 0);
}
}
}
/* The iterations of the loop may communicate only through bivs whose
iteration space can be distributed efficiently. */
for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi = gsi_stmt (gsi);
tree def = PHI_RESULT (phi);
affine_iv iv;
if (is_gimple_reg (def) && !simple_iv (loop, loop, def, &iv, true))
{
struct reduction_info *red;
red = reduction_phi (reduction_list, phi);
if (red == NULL)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
" FAILED: scalar dependency between iterations\n");
return false;
}
}
}
/* We need to version the loop to verify assumptions in runtime. */
if (!can_duplicate_loop_p (loop))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " FAILED: cannot be duplicated\n");
return false;
}
/* Check for problems with dependences. If the loop can be reversed,
the iterations are independent. */
datarefs = VEC_alloc (data_reference_p, heap, 10);
dependence_relations = VEC_alloc (ddr_p, heap, 10 * 10);
compute_data_dependences_for_loop (loop, true, &datarefs,
&dependence_relations);
if (dump_file && (dump_flags & TDF_DETAILS))
dump_data_dependence_relations (dump_file, dependence_relations);
trans = lambda_trans_matrix_new (1, 1);
LTM_MATRIX (trans)[0][0] = -1;
if (lambda_transform_legal_p (trans, 1, dependence_relations))
{
ret = true;
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " SUCCESS: may be parallelized\n");
}
else if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
" FAILED: data dependencies exist across iterations\n");
free_dependence_relations (dependence_relations);
free_data_refs (datarefs);
return ret;
}
/* Return true when LOOP contains basic blocks marked with the
BB_IRREDUCIBLE_LOOP flag. */
static inline bool
loop_has_blocks_with_irreducible_flag (struct loop *loop)
{
unsigned i;
basic_block *bbs = get_loop_body_in_dom_order (loop);
bool res = true;
for (i = 0; i < loop->num_nodes; i++)
if (bbs[i]->flags & BB_IRREDUCIBLE_LOOP)
goto end;
res = false;
end:
free (bbs);
return res;
}
/* Assigns the address of OBJ in TYPE to an ssa name, and returns this name.
The assignment statement is placed on edge ENTRY. DECL_ADDRESS maps decls
to their addresses that can be reused. The address of OBJ is known to
be invariant in the whole function. */
static tree
take_address_of (tree obj, tree type, edge entry, htab_t decl_address)
{
int uid;
void **dslot;
struct int_tree_map ielt, *nielt;
tree *var_p, name, bvar, addr;
gimple stmt;
gimple_seq stmts;
/* Since the address of OBJ is invariant, the trees may be shared.
Avoid rewriting unrelated parts of the code. */
obj = unshare_expr (obj);
for (var_p = &obj;
handled_component_p (*var_p);
var_p = &TREE_OPERAND (*var_p, 0))
continue;
uid = DECL_UID (*var_p);
ielt.uid = uid;
dslot = htab_find_slot_with_hash (decl_address, &ielt, uid, INSERT);
if (!*dslot)
{
addr = build_addr (*var_p, current_function_decl);
bvar = create_tmp_var (TREE_TYPE (addr), get_name (*var_p));
add_referenced_var (bvar);
stmt = gimple_build_assign (bvar, addr);
name = make_ssa_name (bvar, stmt);
gimple_assign_set_lhs (stmt, name);
gsi_insert_on_edge_immediate (entry, stmt);
nielt = XNEW (struct int_tree_map);
nielt->uid = uid;
nielt->to = name;
*dslot = nielt;
}
else
name = ((struct int_tree_map *) *dslot)->to;
if (var_p != &obj)
{
*var_p = build1 (INDIRECT_REF, TREE_TYPE (*var_p), name);
name = force_gimple_operand (build_addr (obj, current_function_decl),
&stmts, true, NULL_TREE);
if (!gimple_seq_empty_p (stmts))
gsi_insert_seq_on_edge_immediate (entry, stmts);
}
if (TREE_TYPE (name) != type)
{
name = force_gimple_operand (fold_convert (type, name), &stmts, true,
NULL_TREE);
if (!gimple_seq_empty_p (stmts))
gsi_insert_seq_on_edge_immediate (entry, stmts);
}
return name;
}
/* Callback for htab_traverse. Create the initialization statement
for reduction described in SLOT, and place it at the preheader of
the loop described in DATA. */
static int
initialize_reductions (void **slot, void *data)
{
tree init, c;
tree bvar, type, arg;
edge e;
struct reduction_info *const reduc = (struct reduction_info *) *slot;
struct loop *loop = (struct loop *) data;
/* Create initialization in preheader:
reduction_variable = initialization value of reduction. */
/* In the phi node at the header, replace the argument coming
from the preheader with the reduction initialization value. */
/* Create a new variable to initialize the reduction. */
type = TREE_TYPE (PHI_RESULT (reduc->reduc_phi));
bvar = create_tmp_var (type, "reduction");
add_referenced_var (bvar);
c = build_omp_clause (OMP_CLAUSE_REDUCTION);
OMP_CLAUSE_REDUCTION_CODE (c) = reduc->reduction_code;
OMP_CLAUSE_DECL (c) = SSA_NAME_VAR (gimple_assign_lhs (reduc->reduc_stmt));
init = omp_reduction_init (c, TREE_TYPE (bvar));
reduc->init = init;
/* Replace the argument representing the initialization value
with the initialization value for the reduction (neutral
element for the particular operation, e.g. 0 for PLUS_EXPR,
1 for MULT_EXPR, etc).
Keep the old value in a new variable "reduction_initial",
that will be taken in consideration after the parallel
computing is done. */
e = loop_preheader_edge (loop);
arg = PHI_ARG_DEF_FROM_EDGE (reduc->reduc_phi, e);
/* Create new variable to hold the initial value. */
SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE
(reduc->reduc_phi, loop_preheader_edge (loop)), init);
reduc->initial_value = arg;
return 1;
}
struct elv_data
{
struct walk_stmt_info info;
edge entry;
htab_t decl_address;
bool changed;
};
/* Eliminates references to local variables in *TP out of the single
entry single exit region starting at DTA->ENTRY.
DECL_ADDRESS contains addresses of the references that had their
address taken already. If the expression is changed, CHANGED is
set to true. Callback for walk_tree. */
static tree
eliminate_local_variables_1 (tree *tp, int *walk_subtrees, void *data)
{
struct elv_data *const dta = (struct elv_data *) data;
tree t = *tp, var, addr, addr_type, type, obj;
if (DECL_P (t))
{
*walk_subtrees = 0;
if (!SSA_VAR_P (t) || DECL_EXTERNAL (t))
return NULL_TREE;
type = TREE_TYPE (t);
addr_type = build_pointer_type (type);
addr = take_address_of (t, addr_type, dta->entry, dta->decl_address);
*tp = build1 (INDIRECT_REF, TREE_TYPE (*tp), addr);
dta->changed = true;
return NULL_TREE;
}
if (TREE_CODE (t) == ADDR_EXPR)
{
/* ADDR_EXPR may appear in two contexts:
-- as a gimple operand, when the address taken is a function invariant
-- as gimple rhs, when the resulting address in not a function
invariant
We do not need to do anything special in the latter case (the base of
the memory reference whose address is taken may be replaced in the
DECL_P case). The former case is more complicated, as we need to
ensure that the new address is still a gimple operand. Thus, it
is not sufficient to replace just the base of the memory reference --
we need to move the whole computation of the address out of the
loop. */
if (!is_gimple_val (t))
return NULL_TREE;
*walk_subtrees = 0;
obj = TREE_OPERAND (t, 0);
var = get_base_address (obj);
if (!var || !SSA_VAR_P (var) || DECL_EXTERNAL (var))
return NULL_TREE;
addr_type = TREE_TYPE (t);
addr = take_address_of (obj, addr_type, dta->entry, dta->decl_address);
*tp = addr;
dta->changed = true;
return NULL_TREE;
}
if (!EXPR_P (t))
*walk_subtrees = 0;
return NULL_TREE;
}
/* Moves the references to local variables in STMT out of the single
entry single exit region starting at ENTRY. DECL_ADDRESS contains
addresses of the references that had their address taken
already. */
static void
eliminate_local_variables_stmt (edge entry, gimple stmt,
htab_t decl_address)
{
struct elv_data dta;
memset (&dta.info, '\0', sizeof (dta.info));
dta.entry = entry;
dta.decl_address = decl_address;
dta.changed = false;
walk_gimple_op (stmt, eliminate_local_variables_1, &dta.info);
if (dta.changed)
update_stmt (stmt);
}
/* Eliminates the references to local variables from the single entry
single exit region between the ENTRY and EXIT edges.
This includes:
1) Taking address of a local variable -- these are moved out of the
region (and temporary variable is created to hold the address if
necessary).
2) Dereferencing a local variable -- these are replaced with indirect
references. */
static void
eliminate_local_variables (edge entry, edge exit)
{
basic_block bb;
VEC (basic_block, heap) *body = VEC_alloc (basic_block, heap, 3);
unsigned i;
gimple_stmt_iterator gsi;
htab_t decl_address = htab_create (10, int_tree_map_hash, int_tree_map_eq,
free);
basic_block entry_bb = entry->src;
basic_block exit_bb = exit->dest;
gather_blocks_in_sese_region (entry_bb, exit_bb, &body);
for (i = 0; VEC_iterate (basic_block, body, i, bb); i++)
if (bb != entry_bb && bb != exit_bb)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
eliminate_local_variables_stmt (entry, gsi_stmt (gsi),
decl_address);
htab_delete (decl_address);
VEC_free (basic_block, heap, body);
}
/* Returns true if expression EXPR is not defined between ENTRY and
EXIT, i.e. if all its operands are defined outside of the region. */
static bool
expr_invariant_in_region_p (edge entry, edge exit, tree expr)
{
basic_block entry_bb = entry->src;
basic_block exit_bb = exit->dest;
basic_block def_bb;
if (is_gimple_min_invariant (expr))
return true;
if (TREE_CODE (expr) == SSA_NAME)
{
def_bb = gimple_bb (SSA_NAME_DEF_STMT (expr));
if (def_bb
&& dominated_by_p (CDI_DOMINATORS, def_bb, entry_bb)
&& !dominated_by_p (CDI_DOMINATORS, def_bb, exit_bb))
return false;
return true;
}
return false;
}
/* If COPY_NAME_P is true, creates and returns a duplicate of NAME.
The copies are stored to NAME_COPIES, if NAME was already duplicated,
its duplicate stored in NAME_COPIES is returned.
Regardless of COPY_NAME_P, the decl used as a base of the ssa name is also
duplicated, storing the copies in DECL_COPIES. */
static tree
separate_decls_in_region_name (tree name,
htab_t name_copies, htab_t decl_copies,
bool copy_name_p)
{
tree copy, var, var_copy;
unsigned idx, uid, nuid;
struct int_tree_map ielt, *nielt;
struct name_to_copy_elt elt, *nelt;
void **slot, **dslot;
if (TREE_CODE (name) != SSA_NAME)
return name;
idx = SSA_NAME_VERSION (name);
elt.version = idx;
slot = htab_find_slot_with_hash (name_copies, &elt, idx,
copy_name_p ? INSERT : NO_INSERT);
if (slot && *slot)
return ((struct name_to_copy_elt *) *slot)->new_name;
var = SSA_NAME_VAR (name);
uid = DECL_UID (var);
ielt.uid = uid;
dslot = htab_find_slot_with_hash (decl_copies, &ielt, uid, INSERT);
if (!*dslot)
{
var_copy = create_tmp_var (TREE_TYPE (var), get_name (var));
DECL_GIMPLE_REG_P (var_copy) = DECL_GIMPLE_REG_P (var);
add_referenced_var (var_copy);
nielt = XNEW (struct int_tree_map);
nielt->uid = uid;
nielt->to = var_copy;
*dslot = nielt;
/* Ensure that when we meet this decl next time, we won't duplicate
it again. */
nuid = DECL_UID (var_copy);
ielt.uid = nuid;
dslot = htab_find_slot_with_hash (decl_copies, &ielt, nuid, INSERT);
gcc_assert (!*dslot);
nielt = XNEW (struct int_tree_map);
nielt->uid = nuid;
nielt->to = var_copy;
*dslot = nielt;
}
else
var_copy = ((struct int_tree_map *) *dslot)->to;
if (copy_name_p)
{
copy = duplicate_ssa_name (name, NULL);
nelt = XNEW (struct name_to_copy_elt);
nelt->version = idx;
nelt->new_name = copy;
nelt->field = NULL_TREE;
*slot = nelt;
}
else
{
gcc_assert (!slot);
copy = name;
}
SSA_NAME_VAR (copy) = var_copy;
return copy;
}
/* Finds the ssa names used in STMT that are defined outside the
region between ENTRY and EXIT and replaces such ssa names with
their duplicates. The duplicates are stored to NAME_COPIES. Base
decls of all ssa names used in STMT (including those defined in
LOOP) are replaced with the new temporary variables; the
replacement decls are stored in DECL_COPIES. */
static void
separate_decls_in_region_stmt (edge entry, edge exit, gimple stmt,
htab_t name_copies, htab_t decl_copies)
{
use_operand_p use;
def_operand_p def;
ssa_op_iter oi;
tree name, copy;
bool copy_name_p;
mark_virtual_ops_for_renaming (stmt);
FOR_EACH_PHI_OR_STMT_DEF (def, stmt, oi, SSA_OP_DEF)
{
name = DEF_FROM_PTR (def);
gcc_assert (TREE_CODE (name) == SSA_NAME);
copy = separate_decls_in_region_name (name, name_copies, decl_copies,
false);
gcc_assert (copy == name);
}
FOR_EACH_PHI_OR_STMT_USE (use, stmt, oi, SSA_OP_USE)
{
name = USE_FROM_PTR (use);
if (TREE_CODE (name) != SSA_NAME)
continue;
copy_name_p = expr_invariant_in_region_p (entry, exit, name);
copy = separate_decls_in_region_name (name, name_copies, decl_copies,
copy_name_p);
SET_USE (use, copy);
}
}
/* Callback for htab_traverse. Adds a field corresponding to the reduction
specified in SLOT. The type is passed in DATA. */
static int
add_field_for_reduction (void **slot, void *data)
{
struct reduction_info *const red = (struct reduction_info *) *slot;
tree const type = (tree) data;
tree var = SSA_NAME_VAR (gimple_assign_lhs (red->reduc_stmt));
tree field = build_decl (FIELD_DECL, DECL_NAME (var), TREE_TYPE (var));
insert_field_into_struct (type, field);
red->field = field;
return 1;
}
/* Callback for htab_traverse. Adds a field corresponding to a ssa name
described in SLOT. The type is passed in DATA. */
static int
add_field_for_name (void **slot, void *data)
{
struct name_to_copy_elt *const elt = (struct name_to_copy_elt *) *slot;
tree type = (tree) data;
tree name = ssa_name (elt->version);
tree var = SSA_NAME_VAR (name);
tree field = build_decl (FIELD_DECL, DECL_NAME (var), TREE_TYPE (var));
insert_field_into_struct (type, field);
elt->field = field;
return 1;
}
/* Callback for htab_traverse. A local result is the intermediate result
computed by a single
thread, or the initial value in case no iteration was executed.
This function creates a phi node reflecting these values.
The phi's result will be stored in NEW_PHI field of the
reduction's data structure. */
static int
create_phi_for_local_result (void **slot, void *data)
{
struct reduction_info *const reduc = (struct reduction_info *) *slot;
const struct loop *const loop = (const struct loop *) data;
edge e;
gimple new_phi;
basic_block store_bb;
tree local_res;
/* STORE_BB is the block where the phi
should be stored. It is the destination of the loop exit.
(Find the fallthru edge from GIMPLE_OMP_CONTINUE). */
store_bb = FALLTHRU_EDGE (loop->latch)->dest;
/* STORE_BB has two predecessors. One coming from the loop
(the reduction's result is computed at the loop),
and another coming from a block preceding the loop,
when no iterations
are executed (the initial value should be taken). */
if (EDGE_PRED (store_bb, 0) == FALLTHRU_EDGE (loop->latch))
e = EDGE_PRED (store_bb, 1);
else
e = EDGE_PRED (store_bb, 0);
local_res
= make_ssa_name (SSA_NAME_VAR (gimple_assign_lhs (reduc->reduc_stmt)),
NULL);
new_phi = create_phi_node (local_res, store_bb);
SSA_NAME_DEF_STMT (local_res) = new_phi;
add_phi_arg (new_phi, reduc->init, e);
add_phi_arg (new_phi, gimple_assign_lhs (reduc->reduc_stmt),
FALLTHRU_EDGE (loop->latch));
reduc->new_phi = new_phi;
return 1;
}
struct clsn_data
{
tree store;
tree load;
basic_block store_bb;
basic_block load_bb;
};
/* Callback for htab_traverse. Create an atomic instruction for the
reduction described in SLOT.
DATA annotates the place in memory the atomic operation relates to,
and the basic block it needs to be generated in. */
static int
create_call_for_reduction_1 (void **slot, void *data)
{
struct reduction_info *const reduc = (struct reduction_info *) *slot;
struct clsn_data *const clsn_data = (struct clsn_data *) data;
gimple_stmt_iterator gsi;
tree type = TREE_TYPE (PHI_RESULT (reduc->reduc_phi));
tree struct_type = TREE_TYPE (TREE_TYPE (clsn_data->load));
tree load_struct;
basic_block bb;
basic_block new_bb;
edge e;
tree t, addr, addr_type, ref, x;
tree tmp_load, name;
gimple load;
load_struct = fold_build1 (INDIRECT_REF, struct_type, clsn_data->load);
t = build3 (COMPONENT_REF, type, load_struct, reduc->field, NULL_TREE);
addr_type = build_pointer_type (type);
addr = build_addr (t, current_function_decl);
/* Create phi node. */
bb = clsn_data->load_bb;
e = split_block (bb, t);
new_bb = e->dest;
tmp_load = create_tmp_var (TREE_TYPE (TREE_TYPE (addr)), NULL);
add_referenced_var (tmp_load);
tmp_load = make_ssa_name (tmp_load, NULL);
load = gimple_build_omp_atomic_load (tmp_load, addr);
SSA_NAME_DEF_STMT (tmp_load) = load;
gsi = gsi_start_bb (new_bb);
gsi_insert_after (&gsi, load, GSI_NEW_STMT);
e = split_block (new_bb, load);
new_bb = e->dest;
gsi = gsi_start_bb (new_bb);
ref = tmp_load;
x = fold_build2 (reduc->reduction_code,
TREE_TYPE (PHI_RESULT (reduc->new_phi)), ref,
PHI_RESULT (reduc->new_phi));
name = force_gimple_operand_gsi (&gsi, x, true, NULL_TREE, true,
GSI_CONTINUE_LINKING);
gsi_insert_after (&gsi, gimple_build_omp_atomic_store (name), GSI_NEW_STMT);
return 1;
}
/* Create the atomic operation at the join point of the threads.
REDUCTION_LIST describes the reductions in the LOOP.
LD_ST_DATA describes the shared data structure where
shared data is stored in and loaded from. */
static void
create_call_for_reduction (struct loop *loop, htab_t reduction_list,
struct clsn_data *ld_st_data)
{
htab_traverse (reduction_list, create_phi_for_local_result, loop);
/* Find the fallthru edge from GIMPLE_OMP_CONTINUE. */
ld_st_data->load_bb = FALLTHRU_EDGE (loop->latch)->dest;
htab_traverse (reduction_list, create_call_for_reduction_1, ld_st_data);
}
/* Callback for htab_traverse. Loads the final reduction value at the
join point of all threads, and inserts it in the right place. */
static int
create_loads_for_reductions (void **slot, void *data)
{
struct reduction_info *const red = (struct reduction_info *) *slot;
struct clsn_data *const clsn_data = (struct clsn_data *) data;
gimple stmt;
gimple_stmt_iterator gsi;
tree type = TREE_TYPE (gimple_assign_lhs (red->reduc_stmt));
tree struct_type = TREE_TYPE (TREE_TYPE (clsn_data->load));
tree load_struct;
tree name;
tree x;
gsi = gsi_after_labels (clsn_data->load_bb);
load_struct = fold_build1 (INDIRECT_REF, struct_type, clsn_data->load);
load_struct = build3 (COMPONENT_REF, type, load_struct, red->field,
NULL_TREE);
x = load_struct;
name = PHI_RESULT (red->keep_res);
stmt = gimple_build_assign (name, x);
SSA_NAME_DEF_STMT (name) = stmt;
gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
for (gsi = gsi_start_phis (gimple_bb (red->keep_res));
!gsi_end_p (gsi); gsi_next (&gsi))
if (gsi_stmt (gsi) == red->keep_res)
{
remove_phi_node (&gsi, false);
return 1;
}
gcc_unreachable ();
}
/* Load the reduction result that was stored in LD_ST_DATA.
REDUCTION_LIST describes the list of reductions that the
loads should be generated for. */
static void
create_final_loads_for_reduction (htab_t reduction_list,
struct clsn_data *ld_st_data)
{
gimple_stmt_iterator gsi;
tree t;
gimple stmt;
gsi = gsi_after_labels (ld_st_data->load_bb);
t = build_fold_addr_expr (ld_st_data->store);
stmt = gimple_build_assign (ld_st_data->load, t);
gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
SSA_NAME_DEF_STMT (ld_st_data->load) = stmt;
htab_traverse (reduction_list, create_loads_for_reductions, ld_st_data);
}
/* Callback for htab_traverse. Store the neutral value for the
particular reduction's operation, e.g. 0 for PLUS_EXPR,
1 for MULT_EXPR, etc. into the reduction field.
The reduction is specified in SLOT. The store information is
passed in DATA. */
static int
create_stores_for_reduction (void **slot, void *data)
{
struct reduction_info *const red = (struct reduction_info *) *slot;
struct clsn_data *const clsn_data = (struct clsn_data *) data;
tree t;
gimple stmt;
gimple_stmt_iterator gsi;
tree type = TREE_TYPE (gimple_assign_lhs (red->reduc_stmt));
gsi = gsi_last_bb (clsn_data->store_bb);
t = build3 (COMPONENT_REF, type, clsn_data->store, red->field, NULL_TREE);
stmt = gimple_build_assign (t, red->initial_value);
mark_virtual_ops_for_renaming (stmt);
gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
return 1;
}
/* Callback for htab_traverse. Creates loads to a field of LOAD in LOAD_BB and
store to a field of STORE in STORE_BB for the ssa name and its duplicate
specified in SLOT. */
static int
create_loads_and_stores_for_name (void **slot, void *data)
{
struct name_to_copy_elt *const elt = (struct name_to_copy_elt *) *slot;
struct clsn_data *const clsn_data = (struct clsn_data *) data;
tree t;
gimple stmt;
gimple_stmt_iterator gsi;
tree type = TREE_TYPE (elt->new_name);
tree struct_type = TREE_TYPE (TREE_TYPE (clsn_data->load));
tree load_struct;
gsi = gsi_last_bb (clsn_data->store_bb);
t = build3 (COMPONENT_REF, type, clsn_data->store, elt->field, NULL_TREE);
stmt = gimple_build_assign (t, ssa_name (elt->version));
mark_virtual_ops_for_renaming (stmt);
gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
gsi = gsi_last_bb (clsn_data->load_bb);
load_struct = fold_build1 (INDIRECT_REF, struct_type, clsn_data->load);
t = build3 (COMPONENT_REF, type, load_struct, elt->field, NULL_TREE);
stmt = gimple_build_assign (elt->new_name, t);
SSA_NAME_DEF_STMT (elt->new_name) = stmt;
gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
return 1;
}
/* Moves all the variables used in LOOP and defined outside of it (including
the initial values of loop phi nodes, and *PER_THREAD if it is a ssa
name) to a structure created for this purpose. The code
while (1)
{
use (a);
use (b);
}
is transformed this way:
bb0:
old.a = a;
old.b = b;
bb1:
a' = new->a;
b' = new->b;
while (1)
{
use (a');
use (b');
}
`old' is stored to *ARG_STRUCT and `new' is stored to NEW_ARG_STRUCT. The
pointer `new' is intentionally not initialized (the loop will be split to a
separate function later, and `new' will be initialized from its arguments).
LD_ST_DATA holds information about the shared data structure used to pass
information among the threads. It is initialized here, and
gen_parallel_loop will pass it to create_call_for_reduction that
needs this information. REDUCTION_LIST describes the reductions
in LOOP. */
static void
separate_decls_in_region (edge entry, edge exit, htab_t reduction_list,
tree *arg_struct, tree *new_arg_struct,
struct clsn_data *ld_st_data)
{
basic_block bb1 = split_edge (entry);
basic_block bb0 = single_pred (bb1);
htab_t name_copies = htab_create (10, name_to_copy_elt_hash,
name_to_copy_elt_eq, free);
htab_t decl_copies = htab_create (10, int_tree_map_hash, int_tree_map_eq,
free);
unsigned i;
tree type, type_name, nvar;
gimple_stmt_iterator gsi;
struct clsn_data clsn_data;
VEC (basic_block, heap) *body = VEC_alloc (basic_block, heap, 3);
basic_block bb;
basic_block entry_bb = bb1;
basic_block exit_bb = exit->dest;
entry = single_succ_edge (entry_bb);
gather_blocks_in_sese_region (entry_bb, exit_bb, &body);
for (i = 0; VEC_iterate (basic_block, body, i, bb); i++)
{
if (bb != entry_bb && bb != exit_bb)
{
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
separate_decls_in_region_stmt (entry, exit, gsi_stmt (gsi),
name_copies, decl_copies);
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
separate_decls_in_region_stmt (entry, exit, gsi_stmt (gsi),
name_copies, decl_copies);
}
}
VEC_free (basic_block, heap, body);
if (htab_elements (name_copies) == 0 && reduction_list == 0)
{
/* It may happen that there is nothing to copy (if there are only
loop carried and external variables in the loop). */
*arg_struct = NULL;
*new_arg_struct = NULL;
}
else
{
/* Create the type for the structure to store the ssa names to. */
type = lang_hooks.types.make_type (RECORD_TYPE);
type_name = build_decl (TYPE_DECL, create_tmp_var_name (".paral_data"),
type);
TYPE_NAME (type) = type_name;
htab_traverse (name_copies, add_field_for_name, type);
if (reduction_list && htab_elements (reduction_list) > 0)
{
/* Create the fields for reductions. */
htab_traverse (reduction_list, add_field_for_reduction,
type);
}
layout_type (type);
/* Create the loads and stores. */
*arg_struct = create_tmp_var (type, ".paral_data_store");
add_referenced_var (*arg_struct);
nvar = create_tmp_var (build_pointer_type (type), ".paral_data_load");
add_referenced_var (nvar);
*new_arg_struct = make_ssa_name (nvar, NULL);
ld_st_data->store = *arg_struct;
ld_st_data->load = *new_arg_struct;
ld_st_data->store_bb = bb0;
ld_st_data->load_bb = bb1;
htab_traverse (name_copies, create_loads_and_stores_for_name,
ld_st_data);
/* Load the calculation from memory (after the join of the threads). */
if (reduction_list && htab_elements (reduction_list) > 0)
{
htab_traverse (reduction_list, create_stores_for_reduction,
ld_st_data);
clsn_data.load = make_ssa_name (nvar, NULL);
clsn_data.load_bb = exit->dest;
clsn_data.store = ld_st_data->store;
create_final_loads_for_reduction (reduction_list, &clsn_data);
}
}
htab_delete (decl_copies);
htab_delete (name_copies);
}
/* Bitmap containing uids of functions created by parallelization. We cannot
allocate it from the default obstack, as it must live across compilation
of several functions; we make it gc allocated instead. */
static GTY(()) bitmap parallelized_functions;
/* Returns true if FN was created by create_loop_fn. */
static bool
parallelized_function_p (tree fn)
{
if (!parallelized_functions || !DECL_ARTIFICIAL (fn))
return false;
return bitmap_bit_p (parallelized_functions, DECL_UID (fn));
}
/* Creates and returns an empty function that will receive the body of
a parallelized loop. */
static tree
create_loop_fn (void)
{
char buf[100];
char *tname;
tree decl, type, name, t;
struct function *act_cfun = cfun;
static unsigned loopfn_num;
snprintf (buf, 100, "%s.$loopfn", current_function_name ());
ASM_FORMAT_PRIVATE_NAME (tname, buf, loopfn_num++);
clean_symbol_name (tname);
name = get_identifier (tname);
type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
decl = build_decl (FUNCTION_DECL, name, type);
if (!parallelized_functions)
parallelized_functions = BITMAP_GGC_ALLOC ();
bitmap_set_bit (parallelized_functions, DECL_UID (decl));
TREE_STATIC (decl) = 1;
TREE_USED (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 0;
TREE_PUBLIC (decl) = 0;
DECL_UNINLINABLE (decl) = 1;
DECL_EXTERNAL (decl) = 0;
DECL_CONTEXT (decl) = NULL_TREE;
DECL_INITIAL (decl) = make_node (BLOCK);
t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_IGNORED_P (t) = 1;
DECL_RESULT (decl) = t;
t = build_decl (PARM_DECL, get_identifier (".paral_data_param"),
ptr_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = decl;
TREE_USED (t) = 1;
DECL_ARGUMENTS (decl) = t;
allocate_struct_function (decl, false);
/* The call to allocate_struct_function clobbers CFUN, so we need to restore
it. */
set_cfun (act_cfun);
return decl;
}
/* Bases all the induction variables in LOOP on a single induction
variable (unsigned with base 0 and step 1), whose final value is
compared with *NIT. When the IV type precision has to be larger
than *NIT type precision, *NIT is converted to the larger type, the
conversion code is inserted before the loop, and *NIT is updated to
the new definition. The induction variable is incremented in the
loop latch. REDUCTION_LIST describes the reductions in LOOP.
Return the induction variable that was created. */
tree
canonicalize_loop_ivs (struct loop *loop, htab_t reduction_list, tree *nit)
{
unsigned precision = TYPE_PRECISION (TREE_TYPE (*nit));
unsigned original_precision = precision;
tree res, type, var_before, val, atype, mtype;
gimple_stmt_iterator gsi, psi;
gimple phi, stmt;
bool ok;
affine_iv iv;
edge exit = single_dom_exit (loop);
struct reduction_info *red;
gimple_seq stmts;
for (psi = gsi_start_phis (loop->header);
!gsi_end_p (psi); gsi_next (&psi))
{
phi = gsi_stmt (psi);
res = PHI_RESULT (phi);
if (is_gimple_reg (res) && TYPE_PRECISION (TREE_TYPE (res)) > precision)
precision = TYPE_PRECISION (TREE_TYPE (res));
}
type = lang_hooks.types.type_for_size (precision, 1);
if (original_precision != precision)
{
*nit = fold_convert (type, *nit);
*nit = force_gimple_operand (*nit, &stmts, true, NULL_TREE);
if (stmts)
gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
}
gsi = gsi_last_bb (loop->latch);
create_iv (build_int_cst_type (type, 0), build_int_cst (type, 1), NULL_TREE,
loop, &gsi, true, &var_before, NULL);
gsi = gsi_after_labels (loop->header);
for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); )
{
phi = gsi_stmt (psi);
res = PHI_RESULT (phi);
if (!is_gimple_reg (res) || res == var_before)
{
gsi_next (&psi);
continue;
}
ok = simple_iv (loop, loop, res, &iv, true);
if (reduction_list)
red = reduction_phi (reduction_list, phi);
else
red = NULL;
/* We preserve the reduction phi nodes. */
if (!ok && red)
{
gsi_next (&psi);
continue;
}
else
gcc_assert (ok);
remove_phi_node (&psi, false);
atype = TREE_TYPE (res);
mtype = POINTER_TYPE_P (atype) ? sizetype : atype;
val = fold_build2 (MULT_EXPR, mtype, unshare_expr (iv.step),
fold_convert (mtype, var_before));
val = fold_build2 (POINTER_TYPE_P (atype)
? POINTER_PLUS_EXPR : PLUS_EXPR,
atype, unshare_expr (iv.base), val);
val = force_gimple_operand_gsi (&gsi, val, false, NULL_TREE, true,
GSI_SAME_STMT);
stmt = gimple_build_assign (res, val);
gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
SSA_NAME_DEF_STMT (res) = stmt;
}
stmt = last_stmt (exit->src);
/* Make the loop exit if the control condition is not satisfied. */
if (exit->flags & EDGE_TRUE_VALUE)
{
edge te, fe;
extract_true_false_edges_from_block (exit->src, &te, &fe);
te->flags = EDGE_FALSE_VALUE;
fe->flags = EDGE_TRUE_VALUE;
}
gimple_cond_set_code (stmt, LT_EXPR);
gimple_cond_set_lhs (stmt, var_before);
gimple_cond_set_rhs (stmt, *nit);
update_stmt (stmt);
return var_before;
}
/* Moves the exit condition of LOOP to the beginning of its header, and
duplicates the part of the last iteration that gets disabled to the
exit of the loop. NIT is the number of iterations of the loop
(used to initialize the variables in the duplicated part).
TODO: the common case is that latch of the loop is empty and immediately
follows the loop exit. In this case, it would be better not to copy the
body of the loop, but only move the entry of the loop directly before the
exit check and increase the number of iterations of the loop by one.
This may need some additional preconditioning in case NIT = ~0.
REDUCTION_LIST describes the reductions in LOOP. */
static void
transform_to_exit_first_loop (struct loop *loop, htab_t reduction_list, tree nit)
{
basic_block *bbs, *nbbs, ex_bb, orig_header;
unsigned n;
bool ok;
edge exit = single_dom_exit (loop), hpred;
tree control, control_name, res, t;
gimple phi, nphi, cond_stmt, stmt;
gimple_stmt_iterator gsi;
split_block_after_labels (loop->header);
orig_header = single_succ (loop->header);
hpred = single_succ_edge (loop->header);
cond_stmt = last_stmt (exit->src);
control = gimple_cond_lhs (cond_stmt);
gcc_assert (gimple_cond_rhs (cond_stmt) == nit);
/* Make sure that we have phi nodes on exit for all loop header phis
(create_parallel_loop requires that). */
for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
{
phi = gsi_stmt (gsi);
res = PHI_RESULT (phi);
t = make_ssa_name (SSA_NAME_VAR (res), phi);
SET_PHI_RESULT (phi, t);
nphi = create_phi_node (res, orig_header);
SSA_NAME_DEF_STMT (res) = nphi;
add_phi_arg (nphi, t, hpred);
if (res == control)
{
gimple_cond_set_lhs (cond_stmt, t);
update_stmt (cond_stmt);
control = t;
}
}
bbs = get_loop_body_in_dom_order (loop);
for (n = 0; bbs[n] != exit->src; n++)
continue;
nbbs = XNEWVEC (basic_block, n);
ok = gimple_duplicate_sese_tail (single_succ_edge (loop->header), exit,
bbs + 1, n, nbbs);
gcc_assert (ok);
free (bbs);
ex_bb = nbbs[0];
free (nbbs);
/* Other than reductions, the only gimple reg that should be copied
out of the loop is the control variable. */
control_name = NULL_TREE;
for (gsi = gsi_start_phis (ex_bb); !gsi_end_p (gsi); )
{
phi = gsi_stmt (gsi);
res = PHI_RESULT (phi);
if (!is_gimple_reg (res))
{
gsi_next (&gsi);
continue;
}
/* Check if it is a part of reduction. If it is,
keep the phi at the reduction's keep_res field. The
PHI_RESULT of this phi is the resulting value of the reduction
variable when exiting the loop. */
exit = single_dom_exit (loop);
if (htab_elements (reduction_list) > 0)
{
struct reduction_info *red;
tree val = PHI_ARG_DEF_FROM_EDGE (phi, exit);
red = reduction_phi (reduction_list, SSA_NAME_DEF_STMT (val));
if (red)
{
red->keep_res = phi;
gsi_next (&gsi);
continue;
}
}
gcc_assert (control_name == NULL_TREE
&& SSA_NAME_VAR (res) == SSA_NAME_VAR (control));
control_name = res;
remove_phi_node (&gsi, false);
}
gcc_assert (control_name != NULL_TREE);
/* Initialize the control variable to NIT. */
gsi = gsi_after_labels (ex_bb);
nit = force_gimple_operand_gsi (&gsi,
fold_convert (TREE_TYPE (control_name), nit),
false, NULL_TREE, false, GSI_SAME_STMT);
stmt = gimple_build_assign (control_name, nit);
gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
SSA_NAME_DEF_STMT (control_name) = stmt;
}
/* Create the parallel constructs for LOOP as described in gen_parallel_loop.
LOOP_FN and DATA are the arguments of GIMPLE_OMP_PARALLEL.
NEW_DATA is the variable that should be initialized from the argument
of LOOP_FN. N_THREADS is the requested number of threads. Returns the
basic block containing GIMPLE_OMP_PARALLEL tree. */
static basic_block
create_parallel_loop (struct loop *loop, tree loop_fn, tree data,
tree new_data, unsigned n_threads)
{
gimple_stmt_iterator gsi;
basic_block bb, paral_bb, for_bb, ex_bb;
tree t, param, res;
gimple stmt, for_stmt, phi, cond_stmt;
tree cvar, cvar_init, initvar, cvar_next, cvar_base, type;
edge exit, nexit, guard, end, e;
/* Prepare the GIMPLE_OMP_PARALLEL statement. */
bb = loop_preheader_edge (loop)->src;
paral_bb = single_pred (bb);
gsi = gsi_last_bb (paral_bb);
t = build_omp_clause (OMP_CLAUSE_NUM_THREADS);
OMP_CLAUSE_NUM_THREADS_EXPR (t)
= build_int_cst (integer_type_node, n_threads);
stmt = gimple_build_omp_parallel (NULL, t, loop_fn, data);
gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
/* Initialize NEW_DATA. */
if (data)
{
gsi = gsi_after_labels (bb);
param = make_ssa_name (DECL_ARGUMENTS (loop_fn), NULL);
stmt = gimple_build_assign (param, build_fold_addr_expr (data));
gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
SSA_NAME_DEF_STMT (param) = stmt;
stmt = gimple_build_assign (new_data,
fold_convert (TREE_TYPE (new_data), param));
gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
SSA_NAME_DEF_STMT (new_data) = stmt;
}
/* Emit GIMPLE_OMP_RETURN for GIMPLE_OMP_PARALLEL. */
bb = split_loop_exit_edge (single_dom_exit (loop));
gsi = gsi_last_bb (bb);
gsi_insert_after (&gsi, gimple_build_omp_return (false), GSI_NEW_STMT);
/* Extract data for GIMPLE_OMP_FOR. */
gcc_assert (loop->header == single_dom_exit (loop)->src);
cond_stmt = last_stmt (loop->header);
cvar = gimple_cond_lhs (cond_stmt);
cvar_base = SSA_NAME_VAR (cvar);
phi = SSA_NAME_DEF_STMT (cvar);
cvar_init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
initvar = make_ssa_name (cvar_base, NULL);
SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, loop_preheader_edge (loop)),
initvar);
cvar_next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
gsi = gsi_last_bb (loop->latch);
gcc_assert (gsi_stmt (gsi) == SSA_NAME_DEF_STMT (cvar_next));
gsi_remove (&gsi, true);
/* Prepare cfg. */
for_bb = split_edge (loop_preheader_edge (loop));
ex_bb = split_loop_exit_edge (single_dom_exit (loop));
extract_true_false_edges_from_block (loop->header, &nexit, &exit);
gcc_assert (exit == single_dom_exit (loop));
guard = make_edge (for_bb, ex_bb, 0);
single_succ_edge (loop->latch)->flags = 0;
end = make_edge (loop->latch, ex_bb, EDGE_FALLTHRU);
for (gsi = gsi_start_phis (ex_bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
phi = gsi_stmt (gsi);
res = PHI_RESULT (phi);
stmt = SSA_NAME_DEF_STMT (PHI_ARG_DEF_FROM_EDGE (phi, exit));
add_phi_arg (phi,
PHI_ARG_DEF_FROM_EDGE (stmt, loop_preheader_edge (loop)),
guard);
add_phi_arg (phi, PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (loop)),
end);
}
e = redirect_edge_and_branch (exit, nexit->dest);
PENDING_STMT (e) = NULL;
/* Emit GIMPLE_OMP_FOR. */
gimple_cond_set_lhs (cond_stmt, cvar_base);
type = TREE_TYPE (cvar);
t = build_omp_clause (OMP_CLAUSE_SCHEDULE);
OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_STATIC;
for_stmt = gimple_build_omp_for (NULL, t, 1, NULL);
gimple_omp_for_set_index (for_stmt, 0, initvar);
gimple_omp_for_set_initial (for_stmt, 0, cvar_init);
gimple_omp_for_set_final (for_stmt, 0, gimple_cond_rhs (cond_stmt));
gimple_omp_for_set_cond (for_stmt, 0, gimple_cond_code (cond_stmt));
gimple_omp_for_set_incr (for_stmt, 0, build2 (PLUS_EXPR, type,
cvar_base,
build_int_cst (type, 1)));
gsi = gsi_last_bb (for_bb);
gsi_insert_after (&gsi, for_stmt, GSI_NEW_STMT);
SSA_NAME_DEF_STMT (initvar) = for_stmt;
/* Emit GIMPLE_OMP_CONTINUE. */
gsi = gsi_last_bb (loop->latch);
stmt = gimple_build_omp_continue (cvar_next, cvar);
gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
SSA_NAME_DEF_STMT (cvar_next) = stmt;
/* Emit GIMPLE_OMP_RETURN for GIMPLE_OMP_FOR. */
gsi = gsi_last_bb (ex_bb);
gsi_insert_after (&gsi, gimple_build_omp_return (true), GSI_NEW_STMT);
return paral_bb;
}
/* Generates code to execute the iterations of LOOP in N_THREADS threads in
parallel. NITER describes number of iterations of LOOP.
REDUCTION_LIST describes the reductions existent in the LOOP. */
static void
gen_parallel_loop (struct loop *loop, htab_t reduction_list,
unsigned n_threads, struct tree_niter_desc *niter)
{
struct loop *nloop;
loop_iterator li;
tree many_iterations_cond, type, nit;
tree arg_struct, new_arg_struct;
gimple_seq stmts;
basic_block parallel_head;
edge entry, exit;
struct clsn_data clsn_data;
unsigned prob;
/* From
---------------------------------------------------------------------
loop
{
IV = phi (INIT, IV + STEP)
BODY1;
if (COND)
break;
BODY2;
}
---------------------------------------------------------------------
with # of iterations NITER (possibly with MAY_BE_ZERO assumption),
we generate the following code:
---------------------------------------------------------------------
if (MAY_BE_ZERO
|| NITER < MIN_PER_THREAD * N_THREADS)
goto original;
BODY1;
store all local loop-invariant variables used in body of the loop to DATA.
GIMPLE_OMP_PARALLEL (OMP_CLAUSE_NUM_THREADS (N_THREADS), LOOPFN, DATA);
load the variables from DATA.
GIMPLE_OMP_FOR (IV = INIT; COND; IV += STEP) (OMP_CLAUSE_SCHEDULE (static))
BODY2;
BODY1;
GIMPLE_OMP_CONTINUE;
GIMPLE_OMP_RETURN -- GIMPLE_OMP_FOR
GIMPLE_OMP_RETURN -- GIMPLE_OMP_PARALLEL
goto end;
original:
loop
{
IV = phi (INIT, IV + STEP)
BODY1;
if (COND)
break;
BODY2;
}
end:
*/
/* Create two versions of the loop -- in the old one, we know that the
number of iterations is large enough, and we will transform it into the
loop that will be split to loop_fn, the new one will be used for the
remaining iterations. */
type = TREE_TYPE (niter->niter);
nit = force_gimple_operand (unshare_expr (niter->niter), &stmts, true,
NULL_TREE);
if (stmts)
gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
many_iterations_cond =
fold_build2 (GE_EXPR, boolean_type_node,
nit, build_int_cst (type, MIN_PER_THREAD * n_threads));
many_iterations_cond
= fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
invert_truthvalue (unshare_expr (niter->may_be_zero)),
many_iterations_cond);
many_iterations_cond
= force_gimple_operand (many_iterations_cond, &stmts, false, NULL_TREE);
if (stmts)
gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
if (!is_gimple_condexpr (many_iterations_cond))
{
many_iterations_cond
= force_gimple_operand (many_iterations_cond, &stmts,
true, NULL_TREE);
if (stmts)
gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
}
initialize_original_copy_tables ();
/* We assume that the loop usually iterates a lot. */
prob = 4 * REG_BR_PROB_BASE / 5;
nloop = loop_version (loop, many_iterations_cond, NULL,
prob, prob, REG_BR_PROB_BASE - prob, true);
update_ssa (TODO_update_ssa);
free_original_copy_tables ();
/* Base all the induction variables in LOOP on a single control one. */
canonicalize_loop_ivs (loop, reduction_list, &nit);
/* Ensure that the exit condition is the first statement in the loop. */
transform_to_exit_first_loop (loop, reduction_list, nit);
/* Generate initializations for reductions. */
if (htab_elements (reduction_list) > 0)
htab_traverse (reduction_list, initialize_reductions, loop);
/* Eliminate the references to local variables from the loop. */
gcc_assert (single_exit (loop));
entry = loop_preheader_edge (loop);
exit = single_dom_exit (loop);
eliminate_local_variables (entry, exit);
/* In the old loop, move all variables non-local to the loop to a structure
and back, and create separate decls for the variables used in loop. */
separate_decls_in_region (entry, exit, reduction_list, &arg_struct,
&new_arg_struct, &clsn_data);
/* Create the parallel constructs. */
parallel_head = create_parallel_loop (loop, create_loop_fn (), arg_struct,
new_arg_struct, n_threads);
if (htab_elements (reduction_list) > 0)
create_call_for_reduction (loop, reduction_list, &clsn_data);
scev_reset ();
/* Cancel the loop (it is simpler to do it here rather than to teach the
expander to do it). */
cancel_loop_tree (loop);
/* Free loop bound estimations that could contain references to
removed statements. */
FOR_EACH_LOOP (li, loop, 0)
free_numbers_of_iterations_estimates_loop (loop);
/* Expand the parallel constructs. We do it directly here instead of running
a separate expand_omp pass, since it is more efficient, and less likely to
cause troubles with further analyses not being able to deal with the
OMP trees. */
omp_expand_local (parallel_head);
}
/* Returns true when LOOP contains vector phi nodes. */
static bool
loop_has_vector_phi_nodes (struct loop *loop ATTRIBUTE_UNUSED)
{
unsigned i;
basic_block *bbs = get_loop_body_in_dom_order (loop);
gimple_stmt_iterator gsi;
bool res = true;
for (i = 0; i < loop->num_nodes; i++)
for (gsi = gsi_start_phis (bbs[i]); !gsi_end_p (gsi); gsi_next (&gsi))
if (TREE_CODE (TREE_TYPE (PHI_RESULT (gsi_stmt (gsi)))) == VECTOR_TYPE)
goto end;
res = false;
end:
free (bbs);
return res;
}
/* Detect parallel loops and generate parallel code using libgomp
primitives. Returns true if some loop was parallelized, false
otherwise. */
bool
parallelize_loops (void)
{
unsigned n_threads = flag_tree_parallelize_loops;
bool changed = false;
struct loop *loop;
struct tree_niter_desc niter_desc;
loop_iterator li;
htab_t reduction_list;
/* Do not parallelize loops in the functions created by parallelization. */
if (parallelized_function_p (cfun->decl))
return false;
reduction_list = htab_create (10, reduction_info_hash,
reduction_info_eq, free);
init_stmt_vec_info_vec ();
FOR_EACH_LOOP (li, loop, 0)
{
htab_empty (reduction_list);
if (/* Do not bother with loops in cold areas. */
optimize_loop_nest_for_size_p (loop)
/* Or loops that roll too little. */
|| expected_loop_iterations (loop) <= n_threads
/* And of course, the loop must be parallelizable. */
|| !can_duplicate_loop_p (loop)
|| loop_has_blocks_with_irreducible_flag (loop)
/* FIXME: the check for vector phi nodes could be removed. */
|| loop_has_vector_phi_nodes (loop)
|| !loop_parallel_p (loop, reduction_list, &niter_desc))
continue;
changed = true;
gen_parallel_loop (loop, reduction_list, n_threads, &niter_desc);
verify_flow_info ();
verify_dominators (CDI_DOMINATORS);
verify_loop_structure ();
verify_loop_closed_ssa ();
}
free_stmt_vec_info_vec ();
htab_delete (reduction_list);
return changed;
}
#include "gt-tree-parloops.h"
|
ast-dump-openmp-teams-distribute-parallel-for-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target
#pragma omp teams distribute parallel for simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target
#pragma omp teams distribute parallel for simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target
#pragma omp teams distribute parallel for simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target
#pragma omp teams distribute parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target
#pragma omp teams distribute parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:3:1, line:8:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:8:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:4:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:6:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:1, col:47>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:47>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:1, col:47>
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <col:3, line:7:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <col:3, line:7:5>
// CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <col:3, line:7:5>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:5:1, col:47>
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:7:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <col:3, line:7:5>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <col:3, line:7:5>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict'
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:10:1, line:16:1> line:10:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:16:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:11:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:12:1, col:47>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:47>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:1, col:47>
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:11:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:11:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:12:1, col:47>
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:11:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:18:1, line:24:1> line:18:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:24:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:19:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:20:1, col:59>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:59>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:1, col:59>
// CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:48, col:58>
// CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:57> 'int'
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 1
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:19:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:19:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:20:1, col:59>
// CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:48, col:58>
// CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:57> 'int'
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 1
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:19:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:26:1, line:32:1> line:26:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:32:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:27:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:28:1, col:59>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:59>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:1, col:59>
// CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:48, col:58>
// CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:57> 'int'
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 2
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:27:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:25, col:28> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:25, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:25, col:18> 'int' '-'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:27:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:28:1, col:59>
// CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:48, col:58>
// CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:57> 'int'
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 2
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:27:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:25, col:28> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:25, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:25, col:18> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:34:1, line:41:1> line:34:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:41:1>
// CHECK-NEXT: `-OMPTargetDirective {{.*}} <line:35:1, col:19>
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:36:1, col:59>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <col:1, col:59>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:1, col:59>
// CHECK-NEXT: | | | | |-OMPCollapseClause {{.*}} <col:48, col:58>
// CHECK-NEXT: | | | | | `-ConstantExpr {{.*}} <col:57> 'int'
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 2
// CHECK-NEXT: | | | | `-CapturedStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &'
// CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:35:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &'
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &'
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:25, col:28> 'int' '+'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:25, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:25, col:18> 'int' '-'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:35:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:36:1, col:59>
// CHECK-NEXT: | | |-OMPCollapseClause {{.*}} <col:48, col:58>
// CHECK-NEXT: | | | `-ConstantExpr {{.*}} <col:57> 'int'
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:57> 'int' 2
// CHECK-NEXT: | | `-CapturedStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &'
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:35:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &'
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &'
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &'
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &'
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &'
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*'
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:25, col:28> 'int' '+'
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:25, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:25, col:18> 'int' '-'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(16*t2-Nz-3,16));t3<=min(min(min(floord(4*Nt+Ny-9,16),floord(8*t1+Ny+7,16)),floord(16*t2+Ny+3,16)),floord(16*t1-16*t2+Nz+Ny+5,16));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(16*t2-Nz-499,512)),ceild(16*t3-Ny-499,512));t4<=min(min(min(min(floord(4*Nt+Nx-9,512),floord(8*t1+Nx+7,512)),floord(16*t2+Nx+3,512)),floord(16*t3+Nx+3,512)),floord(16*t1-16*t2+Nz+Nx+5,512));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),4*t3+2),128*t4+126);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) {
lbv=max(512*t4,4*t5+4);
ubv=min(512*t4+511,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
search.h | // -*- C++ -*-
// Copyright (C) 2007-2018 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/search.h
* @brief Parallel implementation base for std::search() and
* std::search_n().
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_SEARCH_H
#define _GLIBCXX_PARALLEL_SEARCH_H 1
#include <bits/stl_algobase.h>
#include <parallel/parallel.h>
#include <parallel/equally_split.h>
namespace __gnu_parallel
{
/**
* @brief Precalculate __advances for Knuth-Morris-Pratt algorithm.
* @param __elements Begin iterator of sequence to search for.
* @param __length Length of sequence to search for.
* @param __off Returned __offsets.
*/
template<typename _RAIter, typename _DifferenceTp>
void
__calc_borders(_RAIter __elements, _DifferenceTp __length,
_DifferenceTp* __off)
{
typedef _DifferenceTp _DifferenceType;
__off[0] = -1;
if (__length > 1)
__off[1] = 0;
_DifferenceType __k = 0;
for (_DifferenceType __j = 2; __j <= __length; __j++)
{
while ((__k >= 0) && !(__elements[__k] == __elements[__j-1]))
__k = __off[__k];
__off[__j] = ++__k;
}
}
// Generic parallel find algorithm (requires random access iterator).
/** @brief Parallel std::search.
* @param __begin1 Begin iterator of first sequence.
* @param __end1 End iterator of first sequence.
* @param __begin2 Begin iterator of second sequence.
* @param __end2 End iterator of second sequence.
* @param __pred Find predicate.
* @return Place of finding in first sequences. */
template<typename __RAIter1,
typename __RAIter2,
typename _Pred>
__RAIter1
__search_template(__RAIter1 __begin1, __RAIter1 __end1,
__RAIter2 __begin2, __RAIter2 __end2,
_Pred __pred)
{
typedef std::iterator_traits<__RAIter1> _TraitsType;
typedef typename _TraitsType::difference_type _DifferenceType;
_GLIBCXX_CALL((__end1 - __begin1) + (__end2 - __begin2));
_DifferenceType __pattern_length = __end2 - __begin2;
// Pattern too short.
if(__pattern_length <= 0)
return __end1;
// Last point to start search.
_DifferenceType __input_length = (__end1 - __begin1) - __pattern_length;
// Where is first occurrence of pattern? defaults to end.
_DifferenceType __result = (__end1 - __begin1);
_DifferenceType *__splitters;
// Pattern too long.
if (__input_length < 0)
return __end1;
omp_lock_t __result_lock;
omp_init_lock(&__result_lock);
_ThreadIndex __num_threads = std::max<_DifferenceType>
(1, std::min<_DifferenceType>(__input_length,
__get_max_threads()));
_DifferenceType __advances[__pattern_length];
__calc_borders(__begin2, __pattern_length, __advances);
# pragma omp parallel num_threads(__num_threads)
{
# pragma omp single
{
__num_threads = omp_get_num_threads();
__splitters = new _DifferenceType[__num_threads + 1];
__equally_split(__input_length, __num_threads, __splitters);
}
_ThreadIndex __iam = omp_get_thread_num();
_DifferenceType __start = __splitters[__iam],
__stop = __splitters[__iam + 1];
_DifferenceType __pos_in_pattern = 0;
bool __found_pattern = false;
while (__start <= __stop && !__found_pattern)
{
// Get new value of result.
#pragma omp flush(__result)
// No chance for this thread to find first occurrence.
if (__result < __start)
break;
while (__pred(__begin1[__start + __pos_in_pattern],
__begin2[__pos_in_pattern]))
{
++__pos_in_pattern;
if (__pos_in_pattern == __pattern_length)
{
// Found new candidate for result.
omp_set_lock(&__result_lock);
__result = std::min(__result, __start);
omp_unset_lock(&__result_lock);
__found_pattern = true;
break;
}
}
// Make safe jump.
__start += (__pos_in_pattern - __advances[__pos_in_pattern]);
__pos_in_pattern = (__advances[__pos_in_pattern] < 0
? 0 : __advances[__pos_in_pattern]);
}
} //parallel
omp_destroy_lock(&__result_lock);
delete[] __splitters;
// Return iterator on found element.
return (__begin1 + __result);
}
} // end namespace
#endif /* _GLIBCXX_PARALLEL_SEARCH_H */
|
GB_unop__isinf_bool_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isinf_bool_fp32)
// op(A') function: GB (_unop_tran__isinf_bool_fp32)
// C type: bool
// A type: float
// cast: float cij = (aij)
// unaryop: cij = isinf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = isinf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (aij) ; \
Cx [pC] = isinf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISINF || GxB_NO_BOOL || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isinf_bool_fp32)
(
bool *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = (aij) ;
Cx [p] = isinf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = (aij) ;
Cx [p] = isinf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isinf_bool_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_int32_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int32_uint8
// op(A') function: GB_unop_tran__identity_int32_uint8
// C type: int32_t
// A type: uint8_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = (int32_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int32_uint8
(
int32_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int32_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main_visualizer.c |
#include "utils/file_utils.h"
#ifdef COMPILE_OPENGL
#include "draw/draw.h"
#endif
#include "string/sds.h"
#include "single_file_libraries/stb_ds.h"
#include "vtk_utils/vtk_unstructured_grid.h"
#include "vtk_utils/pvd_utils.h"
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
static int current_file = 0;
static inline int get_step_from_filename(char *filename) {
char *ia = filename;
int int_a = 0;
for(; *ia; ia++) {
if(isdigit(*ia))
int_a = int_a*10 + *ia - '0';
}
return int_a;
}
void init_draw_config(struct draw_config *draw_config, struct visualization_options *options) {
draw_config->grid_info.vtk_grid = NULL;
draw_config->max_v = options->max_v;
draw_config->min_v = options->min_v;
if(draw_config->min_v == 0) draw_config->min_v = 0.1f;
draw_config->simulating = true;
draw_config->dt = options->dt;
draw_config->exit = false;
draw_config->restart = false;
draw_config->paused = true;
draw_config->advance_or_return = 0;
draw_config->draw_type = DRAW_FILE;
draw_config->grid_info.vtk_grid = NULL;
draw_config->grid_info.file_name = NULL;
draw_config->error_message = NULL;
draw_config->grid_info.loaded = false;
}
static void read_and_render_activation_map(char *input_file) {
draw_config.grid_info.file_name = NULL;
omp_set_lock(&draw_config.draw_lock);
draw_config.grid_info.vtk_grid = new_vtk_unstructured_grid_from_activation_file(input_file);
if(!draw_config.grid_info.vtk_grid) {
char tmp[4096];
sprintf(tmp, "%s is not an activation map", input_file);
draw_config.error_message = strdup(tmp);
omp_unset_lock(&draw_config.draw_lock);
return;
}
draw_config.grid_info.file_name = input_file;
draw_config.min_v = draw_config.grid_info.vtk_grid->min_v;
draw_config.max_v = draw_config.grid_info.vtk_grid->max_v;
omp_unset_lock(&draw_config.draw_lock);
}
static int read_and_render_files(const char* pvd_file, char *input_dir, char* prefix) {
bool using_pvd = (pvd_file != NULL);
struct vtk_files *vtk_files;
if(!using_pvd) {
vtk_files = (struct vtk_files*) malloc(sizeof(struct vtk_files*));
vtk_files->files_list = list_files_from_dir_sorted(input_dir, prefix);
}
else {
vtk_files = list_files_from_and_timesteps_from_pvd(pvd_file);
}
int num_files = arrlen(vtk_files->files_list);
sds full_path;
if(!using_pvd) {
full_path = sdsnew(input_dir);
}
else {
full_path = sdsnew(get_dir_from_path(pvd_file));
}
if(!num_files) {
char tmp[4096];
sprintf(tmp, "No simulations file found in %s", full_path);
fprintf(stderr, "%s\n", tmp);
draw_config.error_message = strdup(tmp);
return SIMULATION_FINISHED;
}
int step;
int step1;
int step2 = 0;
int final_step;
real_cpu dt = 0;
if(!using_pvd) {
step1 = get_step_from_filename(vtk_files->files_list[0]);
if (num_files > 1) {
step2 = get_step_from_filename(vtk_files->files_list[1]);
step = step2 - step1;
} else {
step = step1;
}
final_step = get_step_from_filename(vtk_files->files_list[num_files - 1]);
dt = draw_config.dt;
draw_config.step = step;
if (dt == 0.0) {
draw_config.final_time = final_step;
} else {
draw_config.final_time = final_step * dt;
}
}
else {
draw_config.final_time = vtk_files->timesteps[num_files-1];
draw_config.dt = -1; //we don't care about dt here as the timesteps are in the PVD file
}
while(true) {
if(!using_pvd) {
if (dt == 0) {
draw_config.time = get_step_from_filename(vtk_files->files_list[current_file]);
} else {
draw_config.time = get_step_from_filename(vtk_files->files_list[current_file]) * dt;
}
}
else {
draw_config.time = vtk_files->timesteps[current_file];
}
sdsfree(full_path);
if(!using_pvd) {
full_path = sdsnew(input_dir);
}
else {
full_path = sdsnew(get_dir_from_path(pvd_file));
}
full_path = sdscat(full_path, "/");
draw_config.grid_info.file_name = NULL;
full_path = sdscat(full_path, vtk_files->files_list[current_file]);
omp_set_lock(&draw_config.draw_lock);
free_vtk_unstructured_grid(draw_config.grid_info.vtk_grid);
draw_config.grid_info.vtk_grid = new_vtk_unstructured_grid_from_vtu_file(full_path);
draw_config.grid_info.loaded = true;
if(!draw_config.grid_info.vtk_grid) {
char tmp[4096];
sprintf(tmp, "Decoder not available for file %s", vtk_files->files_list[current_file]);
fprintf(stderr, "%s\n", tmp);
draw_config.error_message = strdup(tmp);
omp_unset_lock(&draw_config.draw_lock);
arrfree(vtk_files->files_list);
arrfree(vtk_files->timesteps);
free(vtk_files);
sdsfree(full_path);
return SIMULATION_FINISHED;
}
draw_config.grid_info.file_name = full_path;
omp_unset_lock(&draw_config.draw_lock);
omp_set_lock(&draw_config.sleep_lock);
if(draw_config.restart) {
draw_config.time = 0.0;
free_vtk_unstructured_grid(draw_config.grid_info.vtk_grid);
arrfree(vtk_files->files_list);
arrfree(vtk_files->timesteps);
free(vtk_files);
sdsfree(full_path);
return RESTART_SIMULATION;
}
if(draw_config.exit) {
arrfree(vtk_files->files_list);
arrfree(vtk_files->timesteps);
free(vtk_files);
sdsfree(full_path);
return END_SIMULATION;
}
//TODO: maybe change how we handle advance_return
if(draw_config.paused) {
current_file += draw_config.advance_or_return;
draw_config.advance_or_return = 0;
if(current_file < 0) current_file++;
else if(current_file >= num_files) current_file--;
}
else {
current_file++;
if(current_file >= num_files) {
current_file--;
draw_config.paused = true;
}
}
}
}
int main(int argc, char **argv) {
struct visualization_options *options = new_visualization_options();
parse_visualization_options(argc, argv, options);
current_file = options->start_file;
if(!options->activation_map) {
if (!options->input_folder) {
if (!options->pvd_file) {
fprintf(stderr, "Error!. You have to provide a pvd file or a input folder!\n");
}
} else {
if (options->pvd_file) {
fprintf(stderr,
"Warning!. You provided a pvd file (%s) and an input folder (%s). The input folder will be ignored!\n",
options->pvd_file, options->input_folder);
free(options->input_folder);
options->input_folder = NULL;
}
}
}
if(options->save_activation_only) {
struct vtk_unstructured_grid *vtk_grid = new_vtk_unstructured_grid_from_activation_file(options->activation_map);
if(!vtk_grid) {
fprintf(stderr, "Failed to convert %s\n", options->activation_map);
exit(EXIT_FAILURE);
}
sds save_path = sdsnew(options->activation_map);
save_path = sdscat(save_path, ".vtu");
save_vtk_unstructured_grid_as_vtu_compressed(vtk_grid, save_path, 6);
free_vtk_unstructured_grid(vtk_grid);
sdsfree(save_path);
}
else {
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
{
omp_init_lock(&draw_config.draw_lock);
omp_init_lock(&draw_config.sleep_lock);
init_draw_config(&draw_config, options);
init_and_open_visualization_window(DRAW_FILE);
}
#pragma omp section
{
if(options->activation_map) {
read_and_render_activation_map(options->activation_map);
}
else {
int result = read_and_render_files(options->pvd_file, options->input_folder, options->files_prefix);
while (result == RESTART_SIMULATION || result == SIMULATION_FINISHED) {
if (result == RESTART_SIMULATION) {
init_draw_config(&draw_config, options);
current_file = 0;
result = read_and_render_files(options->pvd_file, options->input_folder, options->files_prefix);
}
if (draw_config.restart) result = RESTART_SIMULATION;
if (result == END_SIMULATION || draw_config.exit) {
break;
}
}
}
}
}
}
free_visualization_options(options);
return EXIT_SUCCESS;
} |
GB_unop__identity_int32_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int32_uint16)
// op(A') function: GB (_unop_tran__identity_int32_uint16)
// C type: int32_t
// A type: uint16_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = (int32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int32_uint16)
(
int32_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int32_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fourier.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF OOO U U RRRR IIIII EEEEE RRRR %
% F O O U U R R I E R R %
% FFF O O U U RRRR I EEE RRRR %
% F O O U U R R I E R R %
% F OOO UUU R R IIIII EEEEE R R %
% %
% %
% MagickCore Discrete Fourier Transform Methods %
% %
% Software Design %
% Sean Burke %
% Fred Weinhaus %
% Cristy %
% July 2009 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/fourier.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#if defined(MAGICKCORE_FFTW_DELEGATE)
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
#include <complex.h>
#endif
#include <fftw3.h>
#if !defined(MAGICKCORE_HAVE_CABS)
#define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1]))
#endif
#if !defined(MAGICKCORE_HAVE_CARG)
#define carg(z) (atan2(cimag(z),creal(z)))
#endif
#if !defined(MAGICKCORE_HAVE_CIMAG)
#define cimag(z) (z[1])
#endif
#if !defined(MAGICKCORE_HAVE_CREAL)
#define creal(z) (z[0])
#endif
#endif
/*
Typedef declarations.
*/
typedef struct _FourierInfo
{
ChannelType
channel;
MagickBooleanType
modulus;
size_t
width,
height;
ssize_t
center;
} FourierInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p l e x I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ComplexImages() performs complex mathematics on an image sequence.
%
% The format of the ComplexImages method is:
%
% MagickBooleanType ComplexImages(Image *images,const ComplexOperator op,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A complex operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op,
ExceptionInfo *exception)
{
#define ComplexImageTag "Complex/Image"
CacheView
*Ai_view,
*Ar_view,
*Bi_view,
*Br_view,
*Ci_view,
*Cr_view;
const char
*artifact;
const Image
*Ai_image,
*Ar_image,
*Bi_image,
*Br_image;
double
snr;
Image
*Ci_image,
*complex_images,
*Cr_image,
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
columns,
rows;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (images->next == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",images->filename);
return((Image *) NULL);
}
image=CloneImage(images,0,0,MagickTrue,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
image=DestroyImageList(image);
return(image);
}
image->depth=32UL;
complex_images=NewImageList();
AppendImageToList(&complex_images,image);
image=CloneImage(images->next,0,0,MagickTrue,exception);
if (image == (Image *) NULL)
{
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
image=DestroyImageList(image);
return(image);
}
image->depth=32UL;
AppendImageToList(&complex_images,image);
/*
Apply complex mathematics to image pixels.
*/
artifact=GetImageArtifact(image,"complex:snr");
snr=0.0;
if (artifact != (const char *) NULL)
snr=StringToDouble(artifact,(char **) NULL);
Ar_image=images;
Ai_image=images->next;
Br_image=images;
Bi_image=images->next;
if ((images->next->next != (Image *) NULL) &&
(images->next->next->next != (Image *) NULL))
{
Br_image=images->next->next;
Bi_image=images->next->next->next;
}
Cr_image=complex_images;
Ci_image=complex_images->next;
Ar_view=AcquireVirtualCacheView(Ar_image,exception);
Ai_view=AcquireVirtualCacheView(Ai_image,exception);
Br_view=AcquireVirtualCacheView(Br_image,exception);
Bi_view=AcquireVirtualCacheView(Bi_image,exception);
Cr_view=AcquireAuthenticCacheView(Cr_image,exception);
Ci_view=AcquireAuthenticCacheView(Ci_image,exception);
status=MagickTrue;
progress=0;
columns=MagickMin(Cr_image->columns,Ci_image->columns);
rows=MagickMin(Cr_image->rows,Ci_image->rows);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(Cr_image,complex_images,rows,1L)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
const PixelPacket
*magick_restrict Ai,
*magick_restrict Ar,
*magick_restrict Bi,
*magick_restrict Br;
PixelPacket
*magick_restrict Ci,
*magick_restrict Cr;
ssize_t
x;
if (status == MagickFalse)
continue;
Ar=GetCacheViewVirtualPixels(Ar_view,0,y,columns,1,exception);
Ai=GetCacheViewVirtualPixels(Ai_view,0,y,columns,1,exception);
Br=GetCacheViewVirtualPixels(Br_view,0,y,columns,1,exception);
Bi=GetCacheViewVirtualPixels(Bi_view,0,y,columns,1,exception);
Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,columns,1,exception);
Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,columns,1,exception);
if ((Ar == (const PixelPacket *) NULL) ||
(Ai == (const PixelPacket *) NULL) ||
(Br == (const PixelPacket *) NULL) ||
(Bi == (const PixelPacket *) NULL) ||
(Cr == (PixelPacket *) NULL) || (Ci == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
switch (op)
{
case AddComplexOperator:
{
Cr->red=Ar->red+Br->red;
Ci->red=Ai->red+Bi->red;
Cr->green=Ar->green+Br->green;
Ci->green=Ai->green+Bi->green;
Cr->blue=Ar->blue+Br->blue;
Ci->blue=Ai->blue+Bi->blue;
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity+Br->opacity;
Ci->opacity=Ai->opacity+Bi->opacity;
}
break;
}
case ConjugateComplexOperator:
default:
{
Cr->red=Ar->red;
Ci->red=(-Bi->red);
Cr->green=Ar->green;
Ci->green=(-Bi->green);
Cr->blue=Ar->blue;
Ci->blue=(-Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity;
Ci->opacity=(-Bi->opacity);
}
break;
}
case DivideComplexOperator:
{
double
gamma;
gamma=QuantumRange*PerceptibleReciprocal(QuantumScale*Br->red*Br->red+
QuantumScale*Bi->red*Bi->red+snr);
Cr->red=gamma*(QuantumScale*Ar->red*Br->red+QuantumScale*Ai->red*
Bi->red);
Ci->red=gamma*(QuantumScale*Ai->red*Br->red-QuantumScale*Ar->red*
Bi->red);
gamma=QuantumRange*PerceptibleReciprocal(QuantumScale*Br->green*
Br->green+QuantumScale*Bi->green*Bi->green+snr);
Cr->green=gamma*(QuantumScale*Ar->green*Br->green+QuantumScale*
Ai->green*Bi->green);
Ci->green=gamma*(QuantumScale*Ai->green*Br->green-QuantumScale*
Ar->green*Bi->green);
gamma=QuantumRange*PerceptibleReciprocal(QuantumScale*Br->blue*
Br->blue+QuantumScale*Bi->blue*Bi->blue+snr);
Cr->blue=gamma*(QuantumScale*Ar->blue*Br->blue+QuantumScale*
Ai->blue*Bi->blue);
Ci->blue=gamma*(QuantumScale*Ai->blue*Br->blue-QuantumScale*
Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
gamma=QuantumRange*PerceptibleReciprocal(QuantumScale*Br->opacity*
Br->opacity+QuantumScale*Bi->opacity*Bi->opacity+snr);
Cr->opacity=gamma*(QuantumScale*Ar->opacity*Br->opacity+
QuantumScale*Ai->opacity*Bi->opacity);
Ci->opacity=gamma*(QuantumScale*Ai->opacity*Br->opacity-
QuantumScale*Ar->opacity*Bi->opacity);
}
break;
}
case MagnitudePhaseComplexOperator:
{
Cr->red=sqrt(QuantumScale*Ar->red*Ar->red+QuantumScale*
Ai->red*Ai->red);
Ci->red=atan2((double) Ai->red,(double) Ar->red)/(2.0*MagickPI)+0.5;
Cr->green=sqrt(QuantumScale*Ar->green*Ar->green+QuantumScale*
Ai->green*Ai->green);
Ci->green=atan2((double) Ai->green,(double) Ar->green)/
(2.0*MagickPI)+0.5;
Cr->blue=sqrt(QuantumScale*Ar->blue*Ar->blue+QuantumScale*
Ai->blue*Ai->blue);
Ci->blue=atan2(Ai->blue,Ar->blue)/(2.0*MagickPI)+0.5;
if (images->matte != MagickFalse)
{
Cr->opacity=sqrt(QuantumScale*Ar->opacity*Ar->opacity+
QuantumScale*Ai->opacity*Ai->opacity);
Ci->opacity=atan2((double) Ai->opacity,(double) Ar->opacity)/
(2.0*MagickPI)+0.5;
}
break;
}
case MultiplyComplexOperator:
{
Cr->red=(QuantumScale*Ar->red*Br->red-(double)
Ai->red*Bi->red);
Ci->red=(QuantumScale*Ai->red*Br->red+(double)
Ar->red*Bi->red);
Cr->green=(QuantumScale*Ar->green*Br->green-(double)
Ai->green*Bi->green);
Ci->green=(QuantumScale*Ai->green*Br->green+(double)
Ar->green*Bi->green);
Cr->blue=(QuantumScale*Ar->blue*Br->blue-(double)
Ai->blue*Bi->blue);
Ci->blue=(QuantumScale*Ai->blue*Br->blue+(double)
Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=(QuantumScale*Ar->opacity*Br->opacity-
QuantumScale*Ai->opacity*Bi->opacity);
Ci->opacity=(QuantumScale*Ai->opacity*Br->opacity+
QuantumScale*Ar->opacity*Bi->opacity);
}
break;
}
case RealImaginaryComplexOperator:
{
Cr->red=Ar->red*cos(2.0*MagickPI*(Ai->red-0.5));
Ci->red=Ar->red*sin(2.0*MagickPI*(Ai->red-0.5));
Cr->green=Ar->green*cos(2.0*MagickPI*(Ai->green-0.5));
Ci->green=Ar->green*sin(2.0*MagickPI*(Ai->green-0.5));
Cr->blue=Ar->blue*cos(2.0*MagickPI*(Ai->blue-0.5));
Ci->blue=Ar->blue*sin(2.0*MagickPI*(Ai->blue-0.5));
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity*cos(2.0*MagickPI*(Ai->opacity-0.5));
Ci->opacity=Ar->opacity*sin(2.0*MagickPI*(Ai->opacity-0.5));
}
break;
}
case SubtractComplexOperator:
{
Cr->red=Ar->red-Br->red;
Ci->red=Ai->red-Bi->red;
Cr->green=Ar->green-Br->green;
Ci->green=Ai->green-Bi->green;
Cr->blue=Ar->blue-Br->blue;
Ci->blue=Ai->blue-Bi->blue;
if (Cr_image->matte != MagickFalse)
{
Cr->opacity=Ar->opacity-Br->opacity;
Ci->opacity=Ai->opacity-Bi->opacity;
}
break;
}
}
Ar++;
Ai++;
Br++;
Bi++;
Cr++;
Ci++;
}
if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse)
status=MagickFalse;
if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,ComplexImageTag,progress,images->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
Cr_view=DestroyCacheView(Cr_view);
Ci_view=DestroyCacheView(Ci_view);
Br_view=DestroyCacheView(Br_view);
Bi_view=DestroyCacheView(Bi_view);
Ar_view=DestroyCacheView(Ar_view);
Ai_view=DestroyCacheView(Ai_view);
if (status == MagickFalse)
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r w a r d F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ForwardFourierTransformImage() implements the discrete Fourier transform
% (DFT) of the image either as a magnitude / phase or real / imaginary image
% pair.
%
% The format of the ForwadFourierTransformImage method is:
%
% Image *ForwardFourierTransformImage(const Image *image,
% const MagickBooleanType modulus,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulus: if true, return as transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType RollFourier(const size_t width,const size_t height,
const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels)
{
double
*source_pixels;
MemoryInfo
*source_info;
ssize_t
i,
x;
ssize_t
u,
v,
y;
/*
Move zero frequency (DC, average color) from (0,0) to (width/2,height/2).
*/
source_info=AcquireVirtualMemory(width,height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
return(MagickFalse);
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
i=0L;
for (y=0L; y < (ssize_t) height; y++)
{
if (y_offset < 0L)
v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset;
else
v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height :
y+y_offset;
for (x=0L; x < (ssize_t) width; x++)
{
if (x_offset < 0L)
u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset;
else
u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width :
x+x_offset;
source_pixels[v*width+u]=roll_pixels[i++];
}
}
(void) memcpy(roll_pixels,source_pixels,height*width*
sizeof(*source_pixels));
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType ForwardQuadrantSwap(const size_t width,
const size_t height,double *source_pixels,double *forward_pixels)
{
MagickBooleanType
status;
ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L,
source_pixels);
if (status == MagickFalse)
return(MagickFalse);
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x];
for (y=1; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[(height-y)*width+width/2L-x-1L]=
source_pixels[y*center+x+1L];
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[width/2L-x-1L]=source_pixels[x+1L];
return(MagickTrue);
}
static void CorrectPhaseLHS(const size_t width,const size_t height,
double *fourier_pixels)
{
ssize_t
x;
ssize_t
y;
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
fourier_pixels[y*width+x]*=(-1.0);
}
static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info,
Image *image,double *magnitude,double *phase,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*magnitude_pixels,
*phase_pixels;
Image
*magnitude_image,
*phase_image;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
IndexPacket
*indexes;
PixelPacket
*q;
ssize_t
x;
ssize_t
i,
y;
magnitude_image=GetFirstImageInList(image);
phase_image=GetNextImageInList(image);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",image->filename);
return(MagickFalse);
}
/*
Create "Fourier Transform" image from constituent arrays.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
(void) memset(magnitude_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*magnitude_pixels));
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
(void) memset(phase_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*phase_pixels));
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude,magnitude_pixels);
if (status != MagickFalse)
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase,
phase_pixels);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]/=(2.0*MagickPI);
phase_pixels[i]+=0.5;
i++;
}
}
magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception);
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
magnitude_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(magnitude_view,exception);
if (status == MagickFalse)
break;
}
magnitude_view=DestroyCacheView(magnitude_view);
i=0L;
phase_view=AcquireAuthenticCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(phase_view,exception);
if (status == MagickFalse)
break;
}
phase_view=DestroyCacheView(phase_view);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info,
const Image *image,double *magnitude_pixels,double *phase_pixels,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*value;
double
*source_pixels;
fftw_complex
*forward_pixels;
fftw_plan
fftw_r2c_plan;
MemoryInfo
*forward_info,
*source_info;
const IndexPacket
*indexes;
const PixelPacket
*p;
ssize_t
i,
x;
ssize_t
y;
/*
Generate the forward Fourier transform.
*/
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
memset(source_pixels,0,fourier_info->width*fourier_info->height*
sizeof(*source_pixels));
i=0L;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
source_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
source_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
source_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
source_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
source_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
source_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
image_view=DestroyCacheView(image_view);
forward_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*forward_pixels));
if (forward_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
return(MagickFalse);
}
forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ForwardFourierTransform)
#endif
fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height,
source_pixels,forward_pixels,FFTW_ESTIMATE);
fftw_execute_dft_r2c(fftw_r2c_plan,source_pixels,forward_pixels);
fftw_destroy_plan(fftw_r2c_plan);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0))
{
double
gamma;
/*
Normalize forward transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
forward_pixels[i]*=gamma;
#else
forward_pixels[i][0]*=gamma;
forward_pixels[i][1]*=gamma;
#endif
i++;
}
}
/*
Generate magnitude and phase (or real and imaginary).
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=cabs(forward_pixels[i]);
phase_pixels[i]=carg(forward_pixels[i]);
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=creal(forward_pixels[i]);
phase_pixels[i]=cimag(forward_pixels[i]);
i++;
}
forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info);
return(MagickTrue);
}
static MagickBooleanType ForwardFourierTransformChannel(const Image *image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
double
*magnitude_pixels,
*phase_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
fourier_info.width=image->columns;
fourier_info.height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows : image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
magnitude_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info == (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels,
phase_pixels,exception);
if (status != MagickFalse)
status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels,
phase_pixels,exception);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
#endif
MagickExport Image *ForwardFourierTransformImage(const Image *image,
const MagickBooleanType modulus,ExceptionInfo *exception)
{
Image
*fourier_image;
fourier_image=NewImageList();
#if !defined(MAGICKCORE_FFTW_DELEGATE)
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
image->filename);
#else
{
Image
*magnitude_image;
size_t
height,
width;
width=image->columns;
height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows :
image->columns;
width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
height=width;
magnitude_image=CloneImage(image,width,height,MagickTrue,exception);
if (magnitude_image != (Image *) NULL)
{
Image
*phase_image;
magnitude_image->storage_class=DirectClass;
magnitude_image->depth=32UL;
phase_image=CloneImage(image,width,height,MagickTrue,exception);
if (phase_image == (Image *) NULL)
magnitude_image=DestroyImage(magnitude_image);
else
{
MagickBooleanType
is_gray,
status;
phase_image->storage_class=DirectClass;
phase_image->depth=32UL;
AppendImageToList(&fourier_image,magnitude_image);
AppendImageToList(&fourier_image,phase_image);
status=MagickTrue;
is_gray=IsGrayImage(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GrayChannels,modulus,fourier_image,exception);
else
thread_status=ForwardFourierTransformChannel(image,RedChannel,
modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->matte != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->colorspace == CMYKColorspace)
thread_status=ForwardFourierTransformChannel(image,
IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImageList(fourier_image);
fftw_cleanup();
}
}
}
#endif
return(fourier_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v e r s e F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InverseFourierTransformImage() implements the inverse discrete Fourier
% transform (DFT) of the image either as a magnitude / phase or real /
% imaginary image pair.
%
% The format of the InverseFourierTransformImage method is:
%
% Image *InverseFourierTransformImage(const Image *magnitude_image,
% const Image *phase_image,const MagickBooleanType modulus,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o magnitude_image: the magnitude or real image.
%
% o phase_image: the phase or imaginary image.
%
% o modulus: if true, return transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType InverseQuadrantSwap(const size_t width,
const size_t height,const double *source,double *destination)
{
ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
for (y=1L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L+1L); x++)
destination[(height-y)*center-x+width/2L]=source[y*width+x];
for (y=0L; y < (ssize_t) height; y++)
destination[y*center]=source[y*width+width/2L];
for (x=0L; x < center; x++)
destination[x]=source[center-x-1L];
return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination));
}
static MagickBooleanType InverseFourier(FourierInfo *fourier_info,
const Image *magnitude_image,const Image *phase_image,
fftw_complex *fourier_pixels,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*inverse_pixels,
*magnitude_pixels,
*phase_pixels;
MagickBooleanType
status;
MemoryInfo
*inverse_info,
*magnitude_info,
*phase_info;
const IndexPacket
*indexes;
const PixelPacket
*p;
ssize_t
i,
x;
ssize_t
y;
/*
Inverse Fourier - read image and break down into a double array.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
inverse_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*inverse_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL) ||
(inverse_info == (MemoryInfo *) NULL))
{
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (inverse_info != (MemoryInfo *) NULL)
inverse_info=RelinquishVirtualMemory(inverse_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info);
i=0L;
magnitude_view=AcquireVirtualCacheView(magnitude_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
magnitude_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
magnitude_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
magnitude_view=DestroyCacheView(magnitude_view);
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude_pixels,inverse_pixels);
(void) memcpy(magnitude_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*magnitude_pixels));
i=0L;
phase_view=AcquireVirtualCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
phase_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
phase_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
phase_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
phase_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
phase_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
phase_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]-=0.5;
phase_pixels[i]*=(2.0*MagickPI);
i++;
}
}
phase_view=DestroyCacheView(phase_view);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (status != MagickFalse)
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
phase_pixels,inverse_pixels);
(void) memcpy(phase_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*phase_pixels));
inverse_info=RelinquishVirtualMemory(inverse_info);
/*
Merge two sets.
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I*
magnitude_pixels[i]*sin(phase_pixels[i]);
#else
fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]);
fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]);
#endif
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i];
#else
fourier_pixels[i][0]=magnitude_pixels[i];
fourier_pixels[i][1]=phase_pixels[i];
#endif
i++;
}
magnitude_info=RelinquishVirtualMemory(magnitude_info);
phase_info=RelinquishVirtualMemory(phase_info);
return(status);
}
static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info,
fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
double
*source_pixels;
const char
*value;
fftw_plan
fftw_c2r_plan;
MemoryInfo
*source_info;
IndexPacket
*indexes;
PixelPacket
*q;
ssize_t
i,
x;
ssize_t
y;
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if (LocaleCompare(value,"inverse") == 0)
{
double
gamma;
/*
Normalize inverse transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]*=gamma;
#else
fourier_pixels[i][0]*=gamma;
fourier_pixels[i][1]*=gamma;
#endif
i++;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InverseFourierTransform)
#endif
fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height,
fourier_pixels,source_pixels,FFTW_ESTIMATE);
fftw_execute_dft_c2r(fftw_c2r_plan,fourier_pixels,source_pixels);
fftw_destroy_plan(fftw_c2r_plan);
i=0L;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
if (y >= (ssize_t) image->rows)
break;
q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width >
image->columns ? image->columns : fourier_info->width,1UL,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
if (x < (ssize_t) image->columns)
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
source_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
}
i++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType InverseFourierTransformChannel(
const Image *magnitude_image,const Image *phase_image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
fftw_complex
*inverse_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*inverse_info;
fourier_info.width=magnitude_image->columns;
fourier_info.height=magnitude_image->rows;
if ((magnitude_image->columns != magnitude_image->rows) ||
((magnitude_image->columns % 2) != 0) ||
((magnitude_image->rows % 2) != 0))
{
size_t extent=magnitude_image->columns < magnitude_image->rows ?
magnitude_image->rows : magnitude_image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
inverse_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*inverse_pixels));
if (inverse_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info);
status=InverseFourier(&fourier_info,magnitude_image,phase_image,
inverse_pixels,exception);
if (status != MagickFalse)
status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image,
exception);
inverse_info=RelinquishVirtualMemory(inverse_info);
return(status);
}
#endif
MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image,
const Image *phase_image,const MagickBooleanType modulus,
ExceptionInfo *exception)
{
Image
*fourier_image;
assert(magnitude_image != (Image *) NULL);
assert(magnitude_image->signature == MagickCoreSignature);
if (magnitude_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
magnitude_image->filename);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",magnitude_image->filename);
return((Image *) NULL);
}
#if !defined(MAGICKCORE_FFTW_DELEGATE)
fourier_image=(Image *) NULL;
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
magnitude_image->filename);
#else
{
fourier_image=CloneImage(magnitude_image,magnitude_image->columns,
magnitude_image->rows,MagickTrue,exception);
if (fourier_image != (Image *) NULL)
{
MagickBooleanType
is_gray,
status;
status=MagickTrue;
is_gray=IsGrayImage(magnitude_image,exception);
if (is_gray != MagickFalse)
is_gray=IsGrayImage(phase_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GrayChannels,modulus,fourier_image,exception);
else
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,RedChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->matte != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->colorspace == CMYKColorspace)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImage(fourier_image);
}
fftw_cleanup();
}
#endif
return(fourier_image);
}
|
torch_kdtree_radius.h |
#ifndef TORCH_KDTREE_RADIUS_H_
#define TORCH_KDTREE_RADIUS_H_
// search for a single point
template<int dim>
void TorchKDTree::_search_radius(const float* point, float radius2, std::vector<sint>& out_)
{
using start_end = std::tuple<refIdx_t, refIdx_t>;
float dist = std::numeric_limits<float>::max();
float dist_plane = std::numeric_limits<float>::max();
refIdx_t node_bro;
// BFS
std::queue<start_end> buffer;
refIdx_t node_start, node_end;
buffer.emplace(start_end(root, _search(point, root)));
while (!buffer.empty())
{
std::tie(node_start, node_end) = buffer.front();
buffer.pop();
// back trace until to the starting node
while (true)
{
// update if current node is better
dist = distance<dim>(point, coordinates + numDimensions * kdNodes[node_end].tuple);
if (dist < radius2) // exists a smaller value
{
out_.push_back(kdNodes[node_end].tuple);
}
if (node_end != node_start)
{
node_bro = kdNodes[node_end].brother;
if (node_bro >= 0)
{
// if intersect with plane, search another branch
dist_plane = distance_plane<dim>(point, kdNodes[node_end].parent);
if (dist_plane < radius2)
{
buffer.emplace(start_end(node_bro, _search(point, node_bro)));
}
}
// back trace
node_end = kdNodes[node_end].parent;
}
else break;
}
}
// sort
std::sort(out_.begin(), out_.end());
}
template<int N>
struct WorkerRadius
{
static void work(TorchKDTree* tree_ptr, std::vector<std::vector<sint>>* vec_of_vec_ptr,
float* points_ptr, int numDimensions, int numQuery, float radius2)
{
auto& vec_of_vec = *vec_of_vec_ptr;
#pragma omp parallel for
for (sint i = 0; i < numQuery; ++i) tree_ptr->_search_radius<N>(points_ptr + i * numDimensions, radius2, vec_of_vec[i]);
}
};
std::tuple<torch::Tensor, torch::Tensor> TorchKDTree::search_radius(torch::Tensor points, float radius)
{
CHECK_CONTIGUOUS(points);
CHECK_FLOAT32(points);
TORCH_CHECK(points.size(1) == numDimensions, "dimensions mismatch");
sint numQuery = points.size(0);
float* points_ptr = points.data_ptr<float>();
torch::Tensor indices_tensor, batch_tensor;
float radius2 = POW2(radius);
if (is_cuda)
{
throw runtime_error("CUDA-KDTree cannot do searching");
}
else
{
auto vec_of_vec = std::vector<std::vector<sint>>(numQuery);
Dispatcher<WorkerRadius>::dispatch(numDimensions,
this, &vec_of_vec, points_ptr, numDimensions, numQuery, radius2);
std::vector<sint> ind_size(numQuery);
std::transform(vec_of_vec.begin(), vec_of_vec.end(), ind_size.begin(), [](const std::vector<sint>& vec){return sint(vec.size());});
std::vector<sint> sum_ind_size(numQuery + 1); sum_ind_size[0] = 0;
std::partial_sum(ind_size.begin(), ind_size.end(), sum_ind_size.begin() + 1, std::plus<sint>());
sint sum_size = sum_ind_size.back();
sum_ind_size.pop_back();
indices_tensor = torch::zeros({sum_size}, torch::kInt64);
int64_t* indices_tensor_ptr = indices_tensor.data_ptr<int64_t>();
batch_tensor = torch::zeros({sum_size}, torch::kInt64);
int64_t* batch_tensor_ptr = batch_tensor.data_ptr<int64_t>();
#pragma omp parallel for
for (sint i = 0; i < numQuery; ++i)
{
sint length = vec_of_vec[i].size();
sint start_index = sum_ind_size[i];
std::copy(vec_of_vec[i].begin(), vec_of_vec[i].end(), indices_tensor_ptr + start_index); // index
std::fill(batch_tensor_ptr + start_index, batch_tensor_ptr + start_index + length, i); // batch
}
}
return std::make_tuple(indices_tensor, batch_tensor);
}
#endif
|
decoder.c | /*! @file
* @brief
*
* @version 1.0.0
*
* (C) Copyright 2017 GoPro Inc (http://gopro.com/).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "config.h"
#include "timing.h"
#if WARPSTUFF
#include "WarpLib.h"
#endif
//#include <stdlib.h>
#include <stddef.h>
#include <math.h>
#include <memory.h>
#include <time.h>
//#include <stdint.h>
#ifndef DEBUG
#define DEBUG (1 && _DEBUG)
#endif
#ifndef TIMING
#define TIMING (1 && _TIMING)
#endif
#ifndef XMMOPT
#define XMMOPT (1 && _XMMOPT)
#endif
#define GEN_LICENSE 0
#ifndef PI
#define PI 3.14159265359f
#endif
#ifdef _WIN32
#include <windows.h>
#elif __APPLE__
#include "macdefs.h"
#else
#ifndef ZeroMemory
#define ZeroMemory(p,s) memset(p,0,s)
#endif
#endif
#include <stdio.h>
#include <assert.h>
#ifdef __x86_64__
#include <emmintrin.h> // SSE2 intrinsics
#else
#include "sse2neon/sse2neon.h"
#endif
#include "dump.h"
#include "decoder.h"
#include "codec.h"
#include "vlc.h"
#include "codebooks.h" // References to the codebooks
#include "debug.h"
#include "color.h" // Color formats supported by image processing routines
#include "image.h"
#include "filter.h"
#include "spatial.h"
#include "temporal.h"
//#include "logo40x5.h"
#include "convert.h"
#include "wavelet.h"
#include "bitstream.h"
#include "frame.h"
#include "cpuid.h"
#include "bayer.h"
#include "metadata.h"
#include "DemoasicFrames.h" //TODO: Change filename to lower case
#include "swap.h"
#include "draw.h"
#include "RGB2YUV.h"
#include "lutpath.h"
#include "exception.h"
extern void FastVignetteInplaceWP13(DECODER *decoder, int displayWidth, int width, int height, int y, float r1, float r2, float gain,
int16_t *sptr, int resolution, int pixelsize);
extern void FastSharpeningBlurHinplaceWP13(int width, int16_t *sptr, float sharpness, int resolution, int pixelsize);
extern void FastSharpeningBlurVWP13(short *Aptr,
short *Bptr,
short *Cptr,
short *Dptr,
short *Eptr,
int pitch,
int edgenear,
short *output,
int pixels,
float sharpness,
int resolution,
int channel_blend_type);
extern void FastSharpeningBlurVW13A(short *Aptr,
short *Bptr,
short *Cptr,
short *Dptr,
short *Eptr,
int pitch,
int edgenear,
short *output,
int pixels,
float sharpness,
int resolution,
int channel_blend_type);
#ifdef SPI_LOADER
#include "spi.h"
#include "keyframes.h"
#endif
#ifndef DUMP
#define DUMP (0 && _DUMP)
#endif
#define ERROR_TOLERANT 1
#if defined(_WIN32) && DEBUG
#include <tchar.h> // For printing debug string in the console window
#endif
#define _DECODE_TRANSFORM 1 // Enable concurrent decoding and inverse transform
#define _TRANSFORM_FIELDPLUS 1 // Use the field plus transform
#if _SIF // In SIF resolution, enable the _DECODE_TRANSFORM switch
#if _DECODE_TRANSFORM == 0
#define _DECODE_TRANSFORM 1
#endif
#endif
#ifndef _FSMBUFFER
#define _FSMBUFFER 0
#endif
// Turn off saturation in this file
#ifdef SATURATE
#undef SATURATE
#endif
#define SATURATE(x) (assert(PIXEL_MIN <= (x) && (x) <= PIXEL_MAX), (x))
#define SATURATE8S(x) (assert(PIXEL8S_MIN <= (x) && (x) <= PIXEL8S_MAX), (x))
//#define SATURATE8S(x) SATURATE_8S(x)
//#define SATURATE(x) (x)
// Enable or disable function inlining
#if 1 //DEBUG
#define inline
#else
#define inline __forceinline
#endif
// Pixel size used for computing the compression ratio
#define BITS_PER_PIXEL 8
// Default processor capabilities
#define DEFAULT_FEATURES (_CPU_FEATURE_MMX )
#define DEMOSAIC_DELAYLINES 4
// Forward references
void AllocDecoderGroup(DECODER *decoder);
bool AllocDecoderBuffer(DECODER *decoder, int width, int height, int format);
void EraseDecoderFrames(DECODER *decoder);
TRANSFORM *AllocGroupTransform(GROUP *group, int channel);
void EraseOutputBuffer(uint8_t *buffer, int width, int height, int32_t pitch, int format);
#if _DEBUG
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, FILE *logfile);
#else
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch);
#endif
bool DecodeBandFSM16sNoGapHighByte(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant);
bool DecodeBandFSM16sNoGap2Pass(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant);
void CopyLowpassRGB444ToBuffer(DECODER *decoder, IMAGE *image_array[], int num_channels,
uint8_t *output_buffer, int32_t output_pitch,
FRAME_INFO *info, int chroma_offset,
int precision);
extern void Row16uQuarter2OutputFormat(DECODER *decoder, FRAME_INFO *info, int thread_index,
uint8_t *output, int pitch, int frame, void *scratch, size_t scratch_size, int threading,
uint8_t *channeldata[TRANSFORM_MAX_CHANNELS], // used in quarter res decodes
int channelpitch[TRANSFORM_MAX_CHANNELS]); // used in quarter res decodes);
//extern void ComputeCube(DECODER *decoder);
extern bool NeedCube(DECODER *decoder);
extern void LoadTweak();
//extern int g_topdown;
//extern int g_bottomup;
// Performance measurements
#if _TIMING
extern TIMER tk_decompress; // Timers
extern TIMER tk_decoding;
extern TIMER tk_convert;
extern TIMER tk_inverse;
extern COUNTER decode_byte_count; // Counters
extern COUNTER sample_byte_count;
extern COUNTER alloc_group_count;
extern COUNTER alloc_transform_count;
extern COUNTER alloc_buffer_count;
extern COUNTER spatial_decoding_count;
extern COUNTER temporal_decoding_count;
extern COUNTER progressive_decode_count;
#endif
#if 0
// Table that maps from decoded format to pixel size
static const int PixelSize[] =
{
0, // DECODED_FORMAT_UNSUPPORTED
2, // DECODED_FORMAT_YUYV
2, // DECODED_FORMAT_UYVY
2, // DECODED_FORMAT_420
4, // DECODED_FORMAT_RGB32
3, // DECODED_FORMAT_RGB24
2, // DECODED_FORMAT_RGB555
2, // DECODED_FORMAT_RGB565
#if 0
2, // DECODED_FORMAT_YUYV_INVERTED
2, // DECODED_FORMAT_UYVY_INVERTED
2, // DECODED_FORMAT_420_INVERTED
#endif
4, // DECODED_FORMAT_RGB32_INVERTED
3, // DECODED_FORMAT_RGB24_INVERTED
2, // DECODED_FORMAT_RGB555_INVERTED
2, // DECODED_FORMAT_RGB565_INVERTED
3, // DECODED_FORMAT_V210,
4, // DECODED_FORMAT_YU64, // Custom 16 bits per channel (all data scaled up) YUYV format.
4, // DECODED_FORMAT_YR16 // Rows of YUV with 16 bits per channel
};
#if _DEBUG
char *decoded_format_string[] =
{
"Unsupported",
"YUYV",
"UYUV",
"420",
"RGB32",
"RGB24",
"RGB555",
"RGB565",
#if 0
"YUYV Inverted",
"UYVY Inverted",
"420 Inverted",
#endif
//#if BUILD_PROSPECT
"RGB32 Inverted",
"RGB24 Inverted",
"RGB555 Inverted",
"RGB565 Inverted",
"V210"
//#endif
};
#endif
#else
static const int pixel_size_table[] =
{
0, // COLOR_FORMAT_UNKNOWN
2, // COLOR_FORMAT_UYVY
2, // COLOR_FORMAT_YUYV
2, // COLOR_FORMAT_YVYU
0, // COLOR_FORMAT_YV12
0, // COLOR_FORMAT_I420
2, // COLOR_FORMAT_RGB16
3, // COLOR_FORMAT_RGB24
4, // COLOR_FORMAT_RGB32
0,
3, // COLOR_FORMAT_V210
0, // COLOR_FORMAT_RGB10
4, // COLOR_FORMAT_YU64
4, // COLOR_FORMAT_YR16
4, // COLOR_FORMAT_YUVA
};
static const int pixel_size_table_length = sizeof(pixel_size_table)/sizeof(pixel_size_table[0]);
static int PixelSize(int format)
{
int pixel_size = 0;
// Mask off the other fields in the format descriptor
// Use the lookup table to determine the pixel size (if possible)
if (0 <= format && format < pixel_size_table_length)
{
pixel_size = pixel_size_table[format];
//return pixel_size;
}
//TODO: Change the rest of this routine into one big switch statement
// Is this an Avid format?
else if (COLOR_FORMAT_AVID <= format && format <= COLOR_FORMAT_AVID_END)
{
switch (format)
{
case COLOR_FORMAT_CbYCrY_8bit:
case COLOR_FORMAT_CbYCrY_10bit_2_8: // Only valid for the lower plane
pixel_size = 1;
break;
case COLOR_FORMAT_CbYCrY_16bit:
case COLOR_FORMAT_CbYCrY_16bit_2_14:
case COLOR_FORMAT_CbYCrY_16bit_10_6:
pixel_size = 2;
break;
default:
assert(0);
pixel_size = 2; // Assume 16 bits per pixel if the format is unknown
break;
}
}
// Is this a Bayer format?
else if (COLOR_FORMAT_BAYER <= format && format <= COLOR_FORMAT_BAYER_END)
{
pixel_size = (format - 100);
if(pixel_size > 2)
pixel_size = 2;
}
else if (format == COLOR_FORMAT_RG48)
pixel_size = 6;
else if (format == COLOR_FORMAT_RG64)
pixel_size = 8;
else if (format == COLOR_FORMAT_B64A) {
pixel_size = 8;
}
return pixel_size;
}
#endif
int DecodedPixelSize(DECODED_FORMAT format)
{
int pixel_size = 0;
// Compute the pixel size
switch (format)
{
case DECODED_FORMAT_YUYV:
pixel_size = 2;
break;
case DECODED_FORMAT_RGB32:
pixel_size = 4;
break;
case DECODED_FORMAT_RG48:
pixel_size = 6;
break;
case DECODED_FORMAT_CT_UCHAR:
pixel_size = 2;
break;
case DECODED_FORMAT_CT_SHORT:
case DECODED_FORMAT_CT_SHORT_2_14:
case DECODED_FORMAT_CT_USHORT_10_6:
pixel_size = 4;
break;
case DECODED_FORMAT_CT_10Bit_2_8:
case DECODED_FORMAT_V210:
// This routine should not be called to compute the pixel sizes for these formats
assert(0);
return 0;
break;
case DECODED_FORMAT_ROW16U:
pixel_size = 4;
break;
default:
assert(0);
return 0;
break;
}
return pixel_size;
}
#if 0
// Convert FOURCC code to a string
static void str4cc(char *string, uint32_t marker)
{
char *p = (char *)&marker + 3;
char *s = string;
int i;
for (i = 0; i < 4; i++)
*(s++) = *(p--);
*s = '\0';
}
#endif
void GetDisplayAspectRatio(DECODER *decoder, int *w, int *h)
{
int origw,origh, guess = 0;
origw = decoder->frame.width;
origh = decoder->frame.height;
switch(decoder->frame.resolution)
{
case DECODED_RESOLUTION_FULL:
break;
case DECODED_RESOLUTION_HALF:
origw *= 2;
origh *= 2;
break;
case DECODED_RESOLUTION_QUARTER:
origw *= 4;
origh *= 4;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
origw *= 8;
origh *= 8;
break;
case DECODED_RESOLUTION_FULL_DEBAYER:
break;
case DECODED_RESOLUTION_HALF_NODEBAYER:
origw *= 2;
origh *= 2;
break;
case DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED:
origw *= 4;
origh *= 4;
break;
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
//origw *= 2; //DAN20110129 -- seems the width has been corrected elsewhere or was never halved.
break;
case DECODED_RESOLUTION_HALF_HORIZONTAL:
origw *= 2;
break;
case DECODED_RESOLUTION_HALF_VERTICAL:
origh *= 2;
break;
}
if(decoder->codec.picture_aspect_x <= 0 || decoder->codec.picture_aspect_y <= 0)
guess = 1;
// if guess default values, we can't trust them
if(decoder->codec.picture_aspect_x == 16 && decoder->codec.picture_aspect_y == 9)
guess = 1;
if(decoder->pixel_aspect_x && decoder->pixel_aspect_y)
{
int j,den,num;
decoder->codec.picture_aspect_x = num = (origw * decoder->pixel_aspect_x) / decoder->pixel_aspect_y;
decoder->codec.picture_aspect_y = den = origh;
for(j=2; j<num+den; j++)
{
while(num == (num/j)*j && den == (den/j)*j)
{
num /= j;
den /= j;
}
}
decoder->codec.picture_aspect_x = num;
decoder->codec.picture_aspect_y = den;
guess = 0;
}
if(guess)
{
if(origw > 720) //HD.
{
if(origh == 1080)
{
if(origw == 2048)
*w=origw,*h=origh;
else
*w=16,*h=9; // assume 16x9
}
else if(origh == 720)
{
*w=16,*h=9; // assume 16x9
}
else
{
*w=origw,*h=origh; // assume square pixel.
}
}
else
{
if(origh == 720)
{
*w=16,*h=9; // assume 16x9
}
else
{
*w=origw,*h=origh; // assume square pixel.
}
}
}
else
{
*w=decoder->codec.picture_aspect_x;
*h=decoder->codec.picture_aspect_y;
}
}
bool IsValidFrameResolution(int resolution)
{
switch (resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_HALF:
case DECODED_RESOLUTION_QUARTER:
case DECODED_RESOLUTION_LOWPASS_ONLY:
case DECODED_RESOLUTION_HALF_HORIZONTAL:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
return true;
default:
return false;
}
}
// Return true if this decoder can decode to quarter resolution
bool IsQuarterResolutionEnabled(DECODER *decoder)
{
return true;
}
size_t DecoderSize()
{
return sizeof(DECODER);
}
void InitDecoder(DECODER *decoder, FILE *logfile, CODESET *cs)
{
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "InitDecoder, decoder: 0x%p\n", decoder);
}
#endif
{
//TODO: Clear the decoder before setting the CPU limit and affinity
int i;
//int thread_limit=0, thread_affinity=0, set_thread_params=0, capabilities=0;
//save key params
Thread_cntrl saved_params = decoder->thread_cntrl;
// Clear everything
memset(decoder, 0, sizeof(DECODER));
//restore key params
if(saved_params.set_thread_params == 1) // used by the DShow Interface
{
decoder->thread_cntrl = saved_params;
}
#if _TIMING
InitTiming();
#endif
// Set the file for status information during decoding
decoder->logfile = logfile;
// Initialize the decoding error to no error
decoder->error = CODEC_ERROR_OKAY;
// Most recent marker found during decoding
decoder->marker = 0;
// Count of frames decoded
decoder->frame_count = 0;
// Set the codebooks that will be used for decoding
if (cs != NULL)
{
// Use the codeset provided in the call
for(i=0; i<CODEC_NUM_CODESETS; i++)
{
// Codebook for decoding highpass coefficients
decoder->magsbook[i] = cs[i].magsbook;
// Codebook for decoding runs of coefficients
decoder->runsbook[i] = cs[i].runsbook;
// Lookup table for fast codebook search
decoder->fastbook[i] = cs[i].fastbook;
}
}
else
{
// Use the default codeset
decoder->magsbook[0] = cs9.magsbook;
decoder->runsbook[0] = cs9.runsbook;
decoder->fastbook[0] = cs9.fastbook;
}
// Initialize the codec state
InitCodecState(&decoder->codec);
InitScratchBuffer(&decoder->scratch, NULL, 0);
#if _DUMP
// Initialize the descriptor for controlling debug output
decoder->dump.enabled = false;
decoder->dump.channel_mask = 0;
decoder->dump.wavelet_mask = 0;
memset(decoder->dump.directory, 0, sizeof(decoder->dump.directory));
memset(decoder->dump.filename, 0, sizeof(decoder->dump.filename));
#endif
}
//REDTEST
decoder->frm = 0;
decoder->run = 1;
#if _ALLOCATOR
decoder->allocator = NULL;
#endif
decoder->initialized = 1; //DAN20060912
}
void InitDecoderLicense(DECODER *decoder, const unsigned char *licensekey)
{
if (decoder && licensekey)
{
const unsigned char unlicensed[16] = {0};
//memset(unlicensed, 0, sizeof(unlicensed));
// Has the license been set?
if (memcmp(decoder->licensekey, unlicensed, sizeof(decoder->licensekey)) == 0)
{
// Copy the license into the decoder
memcpy(decoder->licensekey, licensekey, sizeof(decoder->licensekey));
}
}
}
// Free data allocated within the decoder
void ClearDecoder(DECODER *decoder)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
// Free the transforms allocated in the decoder
int i;
if(decoder->initialized == 0)
return; // nothing to free //DAN20060912
#if _GRAPHICS
DrawClose(decoder);
#endif
for(i=0; i<=METADATA_PRIORITY_MAX; i++)
{
if(decoder->DataBases[i])
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->DataBases[i]);
#else
MEMORY_FREE(decoder->DataBases[i]);
#endif
decoder->DataBases[i] = NULL;
decoder->DataBasesSize[i] = 0;
decoder->DataBasesAllocSize[i] = 0;
}
}
if(decoder->sqrttable)
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->sqrttable);
#else
MEMORY_FREE(decoder->sqrttable);
#endif
decoder->sqrttable = NULL;
}
for (i = 0; i < TRANSFORM_MAX_CHANNELS; i++)
{
#if _ALLOCATOR
FreeTransform(allocator, decoder->transform[i]);
#else
FreeTransform(decoder->transform[i]);
#endif
decoder->transform[i] = NULL;
}
if(decoder->aligned_sample_buffer)
{
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->aligned_sample_buffer);
#else
MEMORY_ALIGNED_FREE(decoder->aligned_sample_buffer);
#endif
decoder->aligned_sample_buffer = NULL;
decoder->aligned_sample_buffer_size = 0;
}
if(decoder->tools)
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->tools);
#else
MEMORY_FREE(decoder->tools);
#endif
decoder->tools = NULL;
}
// Free the buffer allocated for decoding
if (decoder->buffer != NULL)
{
#if DEBUG_BUFFER_USAGE
int i;
char *ptr = (char *)decoder->buffer;
FILE *fp = fopen("C:/free.txt", "a");
fprintf(fp, "decoder->buffer = %08x buffer_size = %d\n", decoder->buffer ,decoder->buffer_size);
i = decoder->buffer_size-1;
while(ptr[i] == 1) i--;
fprintf(fp, "used %2.3f percent\n", 100.0*(float)i/(float)decoder->buffer_size);
fclose(fp);
#endif
#if _ALLOCATOR
FreeAligned(allocator, decoder->buffer);
#else
MEMORY_ALIGNED_FREE(decoder->buffer);
#endif
decoder->buffer = NULL;
decoder->buffer_size = 0;
// Clear the fields in the scratch buffer descriptor
memset(&decoder->scratch, 0, sizeof(SCRATCH));
// Eventually the buffer and buffer size fields will be obsolete
}
for(i=0;i<_MAX_CPUS;i++)
{
if(decoder->threads_buffer[i])
{
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->threads_buffer[i]);
#else
MEMORY_ALIGNED_FREE(decoder->threads_buffer[i]);
#endif
decoder->threads_buffer[i] = NULL;
}
}
decoder->threads_buffer_size = 0;
// Do not attempt to free the codebooks since the
// codebook pointers are references to static tables
// Can free some of the data structures allocated by the decoder
FreeCodebooks(decoder);
#if _INTERLACED_WORKER_THREADS
if(decoder->interlaced_worker.lock_init) // threads started
{
int i;
// Signal this thread to stop
SetEvent(decoder->interlaced_worker.stop_event);
// Free all handles used by the worker threads
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
WaitForSingleObject(decoder->interlaced_worker.handle[i], INFINITE); //JY20080307
CloseHandle(decoder->interlaced_worker.handle[i]);
CloseHandle(decoder->interlaced_worker.start_event[i]);
CloseHandle(decoder->interlaced_worker.done_event[i]);
}
CloseHandle(decoder->interlaced_worker.row_semaphore);
CloseHandle(decoder->interlaced_worker.stop_event);
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.handle[i] = 0;
decoder->interlaced_worker.start_event[i] = 0;
decoder->interlaced_worker.done_event[i] = 0;
}
decoder->interlaced_worker.row_semaphore = 0;
decoder->interlaced_worker.stop_event = 0;
}
// Free the critical section used by the worker threads
DeleteCriticalSection(&decoder->interlaced_worker.lock);
decoder->interlaced_worker.lock_init = 0;
#endif
#if _THREADED
if(decoder->entropy_worker_new.pool.thread_count)
{
ThreadPoolDelete(&decoder->entropy_worker_new.pool);
DeleteLock(&decoder->entropy_worker_new.lock);
}
if(decoder->worker_thread.pool.thread_count)
{
ThreadPoolDelete(&decoder->worker_thread.pool);
DeleteLock(&decoder->worker_thread.lock);
}
if(decoder->draw_thread.pool.thread_count)
{
ThreadPoolDelete(&decoder->draw_thread.pool);
DeleteLock(&decoder->draw_thread.lock);
}
/*
if(decoder->qt_convert_worker.pool.thread_count)
{
ThreadPoolDelete(&decoder->qt_convert_worker.pool);
DeleteLock(&decoder->qt_convert_worker.lock);
}
if(decoder->qt_scale_worker.pool.thread_count)
{
ThreadPoolDelete(&decoder->qt_scale_worker.pool);
DeleteLock(&decoder->qt_scale_worker.lock);
}
*/
if(decoder->parallelDecoder)
{
if(decoder->parallelDecoder->decoder_thread.pool.thread_count)
{
ThreadPoolDelete(&decoder->parallelDecoder->decoder_thread.pool);
DeleteLock(&decoder->parallelDecoder->decoder_thread.lock);
decoder->parallelDecoder->decoder_thread.pool.thread_count = 0;
}
ClearDecoder(decoder->parallelDecoder);
#if _ALLOCATOR
Free(decoder->allocator, decoder->parallelDecoder);
#else
MEMORY_FREE(decoder->parallelDecoder);
#endif
decoder->parallelDecoder = NULL;
}
#endif
//MEMORY_ALIGNED_FREE(RawBayer16);
#if _ALLOCATOR
if(decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = 0;
decoder->RGBFilterBufferSize = 0;
}
if(decoder->RawBayer16)
{
FreeAligned(decoder->allocator, decoder->RawBayer16);
decoder->RawBayer16 = 0;
decoder->RawBayerSize = 0;
}
if(decoder->StereoBuffer)
{
FreeAligned(decoder->allocator, decoder->StereoBuffer);
decoder->StereoBuffer = 0;
decoder->StereoBufferSize = 0;
}
if(decoder->RawCube)
{
FreeAligned(decoder->allocator, decoder->RawCube);
decoder->RawCube = 0;
}
if(decoder->Curve2Linear)
{
FreeAligned(decoder->allocator, decoder->Curve2Linear);
decoder->Curve2Linear = 0;
}
if(decoder->Linear2CurveRed)
{
FreeAligned(decoder->allocator, decoder->Linear2CurveRed);
decoder->Linear2CurveRed = NULL;
}
if(decoder->Linear2CurveGrn)
{
FreeAligned(decoder->allocator, decoder->Linear2CurveGrn);
decoder->Linear2CurveGrn = NULL;
}
if(decoder->Linear2CurveBlu)
{
FreeAligned(decoder->allocator, decoder->Linear2CurveBlu);
decoder->Linear2CurveBlu = NULL;
}
if(decoder->BYR4LinearRestore)
{
FreeAligned(decoder->allocator, decoder->BYR4LinearRestore);
decoder->BYR4LinearRestore = NULL;
}
if(decoder->GammaContrastRed)
{
FreeAligned(decoder->allocator, decoder->GammaContrastRed);
decoder->GammaContrastRed = NULL;
}
if(decoder->GammaContrastGrn)
{
FreeAligned(decoder->allocator, decoder->GammaContrastGrn);
decoder->GammaContrastGrn = NULL;
}
if(decoder->GammaContrastBlu)
{
FreeAligned(decoder->allocator, decoder->GammaContrastBlu);
decoder->GammaContrastBlu = NULL;
}
//3d LUT
{
if(decoder->LUTcache)
Free(decoder->allocator, decoder->LUTcache);
decoder->LUTcache = NULL;
decoder->LUTcacheCRC = 0;
}
#if WARPSTUFF
{
if (decoder->lens_correct_buffer)
#if _ALLOCATOR
Free(decoder->allocator, decoder->lens_correct_buffer);
#else
MEMORY_ALIGNED_FREE(decoder->lens_correct_buffer);
#endif
if (decoder->mesh)
geomesh_destroy(decoder->mesh);
decoder->lastLensOffsetX = 0;
decoder->lastLensOffsetY = 0;
decoder->lastLensOffsetZ = 0;
decoder->lastLensOffsetR = 0;
decoder->lastLensZoom = 0;
decoder->lastLensFishFOV = 0;
decoder->lastLensGoPro = 0;
decoder->lastLensSphere = 0;
decoder->lastLensFill = 0;
decoder->lastLensStyleSel = 0;
memset(decoder->lastLensCustomSRC, 0, sizeof(decoder->lastLensCustomSRC));
memset(decoder->lastLensCustomDST, 0, sizeof(decoder->lastLensCustomDST));
decoder->mesh = NULL;
decoder->lens_correct_buffer = NULL;
}
#endif
if(decoder->overrideData)
{
Free(decoder->allocator, decoder->overrideData);
decoder->overrideData = NULL;
decoder->overrideSize = 0;
}
for(i=0; i<64; i++)
{
if(decoder->mdc[i])
Free(decoder->allocator, decoder->mdc[i]);
decoder->mdc[i] = NULL;
decoder->mdc_size[i] = 0;
}
#else
if(decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
if(decoder->RawBayer16)
{
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
}
if(decoder->StereoBuffer)
{
MEMORY_ALIGNED_FREE(decoder->StereoBuffer);
decoder->StereoBuffer = NULL;
decoder->StereoBufferSize = 0;
}
if(decoder->RawCube)
{
MEMORY_ALIGNED_FREE(decoder->RawCube);
decoder->RawCube = NULL;
}
if(decoder->Curve2Linear)
{
MEMORY_ALIGNED_FREE(decoder->Curve2Linear);
decoder->Curve2Linear = NULL;
}
if(decoder->BYR4LinearRestore)
{
MEMORY_ALIGNED_FREE(decoder->BYR4LinearRestore);
decoder->BYR4LinearRestore = NULL;
}
if(decoder->Linear2CurveRed)
{
MEMORY_ALIGNED_FREE(decoder->Linear2CurveRed);
decoder->Linear2CurveRed = NULL;
}
if(decoder->Linear2CurveGrn)
{
MEMORY_ALIGNED_FREE(decoder->Linear2CurveGrn);
decoder->Linear2CurveGrn = NULL;
}
if(decoder->Linear2CurveBlu)
{
MEMORY_ALIGNED_FREE(decoder->Linear2CurveBlu);
decoder->Linear2CurveBlu = NULL;
}
if(decoder->GammaContrastRed)
{
MEMORY_ALIGNED_FREE(decoder->GammaContrastRed);
decoder->GammaContrastRed = NULL;
}
if(decoder->GammaContrastGrn)
{
MEMORY_ALIGNED_FREE(decoder->GammaContrastGrn);
decoder->GammaContrastGrn = NULL;
}
if(decoder->GammaContrastBlu)
{
MEMORY_ALIGNED_FREE(decoder->GammaContrastBlu);
decoder->GammaContrastBlu = NULL;
}
//3d LUT
{
if(decoder->LUTcache)
MEMORY_FREE(decoder->LUTcache);
decoder->LUTcache = NULL;
decoder->LUTcacheCRC = 0;
}
#if WARPSTUFF
{
if (decoder->lens_correct_buffer)
#if _ALLOCATOR
Free(decoder->allocator, decoder->lens_correct_buffer);
#else
MEMORY_ALIGNED_FREE(decoder->lens_correct_buffer);
#endif
if (decoder->mesh)
geomesh_destroy(mesh);
decoder->mesh = NULL;
decoder->lens_correct_buffer = NULL;
decoder->lastLensOffsetX = 0;
decoder->lastLensOffsetY = 0;
decoder->lastLensOffsetZ = 0;
decoder->lastLensOffsetR = 0;
decoder->lastLensZoom = 0;
decoder->lastLensFishFOV = 0;
decoder->lastLlensGoPro = 0;
decoder->lastLlensSphere = 0;
decoder->lastLlensFill = 0;
decoder->lastLlensStyleSel = 0;
memset(decoder->lastLensCustomSRC, 0, sizeof(decoder->lastLensCustomSRC));
memset(decoder->lastLensCustomDST, 0, sizeof(decoder->lastLensCustomDST));
}
#endif
if(decoder->overrideData)
{
MEMORY_FREE(decoder->overrideData);
decoder->overrideData = NULL;
decoder->overrideSize = 0;
}
for(i=0; i<64; i++)
{
if(decoder->mdc[i])
MEMORY_FREE(decoder->mdc[i]);
decoder->mdc[i] = NULL;
decoder->mdc_size[i] = 0;
}
#endif
#ifdef SPI_LOADER
SPIReleaseAll(decoder);
//KeyframesReleaseAll(decoder);
#endif
decoder->initialized = 0;// cleared
}
void ExitDecoder(DECODER *decoder)
{
// Let the caller keep the logfile open or choose to close it
//if (logfile) fclose(logfile);
// Free data allocated within the decoder
ClearDecoder(decoder);
}
// Allocate the data structures for decoding a group
void AllocDecoderGroup(DECODER *decoder)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;//DAN07022004
int channel;
assert(decoder->codec.num_channels <= TRANSFORM_MAX_CHANNELS); //DAN07022004
for (channel = 0; channel < TRANSFORM_MAX_CHANNELS; channel++)//DAN07022004
{
TRANSFORM *transform = decoder->transform[channel];
// Need to allocate a transform data structure?
if (transform == NULL) {
#if _ALLOCATOR
transform = (TRANSFORM *)Alloc(allocator, sizeof(TRANSFORM));
#else
transform = (TRANSFORM *)MEMORY_ALLOC(sizeof(TRANSFORM));
#endif
assert(transform != NULL);
if (transform == NULL) {
decoder->error = CODEC_ERROR_TRANSFORM_MEMORY;
return;
}
memset(transform, 0, sizeof(TRANSFORM));
decoder->transform[channel] = transform;
#if _TIMING
alloc_transform_count++;
#endif
}
}
}
// Allocate the buffer used for intermediate results during decoding
bool AllocDecoderBuffer(DECODER *decoder, int width, int height, int format)
{
int cpus;
size_t size;
size_t row_size;
char *buffer;
#if 0
// Allocate a buffer large enough for six rows of cache lines
size = width * sizeof(PIXEL);
size = ALIGN(size, _CACHE_LINE_SIZE);
size = 2 * TRANSFORM_MAX_CHANNELS * size;
#else
// Allocate a buffer large enough for nine rows of cache lines
size = width * sizeof(PIXEL) * 4;
size = ALIGN(size, _CACHE_LINE_SIZE);
size = 3 * TRANSFORM_MAX_CHANNELS * size;
#endif
switch (format)
{
case DECODED_FORMAT_V210:
case DECODED_FORMAT_YU64:
// Increase the buffer size for decoding to the V210 format
row_size = 4 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 4 * 2 * row_size;
break;
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_CbYCrY_10bit_2_8:
case DECODED_FORMAT_CbYCrY_16bit_2_14:
case DECODED_FORMAT_CbYCrY_16bit_10_6:
// Increase the buffer size for decoding to the YUV16 format
row_size = 4 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 8 * 2 * row_size;
break;
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_WP13:
// Increase the buffer size for decoding to the YUV16 format
row_size = 6 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 12 * 2 * row_size;
break;
case DECODED_FORMAT_RG64:
// Increase the buffer size for decoding to the YUV16 format
row_size = 8 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 16 * 2 * row_size;
break;
case DECODED_FORMAT_BYR3:
// Increase the buffer size for decoding to the YUV16 format
row_size = 2 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 4 * 2 * row_size;
break;
case DECODED_FORMAT_BYR4:
// Increase the buffer size for decoding to the YUV16 format
row_size = 2 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 4 * 2 * row_size;
break;
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_W13A:
// Increase the buffer size for decoding to the B64A format
row_size = 8 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 16 * 2 * row_size;
break;
default:
// Increase the buffer size for YUV to RGB conversion
row_size = 3 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 2 * 2 * row_size;
break;
}
cpus = decoder->thread_cntrl.capabilities >> 16;
if(cpus > 4)
size *= 4;
if(cpus > 16) //DAN20120803 -- 4444 clips
size *= 2;
// Has a buffer already been allocated?
if (decoder->buffer != NULL)
{
// Is the buffer large enough?
if (decoder->buffer_size < size)
{
// Free the previous buffer
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->buffer);
#else
MEMORY_ALIGNED_FREE(decoder->buffer);
#endif
decoder->buffer = NULL;
decoder->buffer_size = 0;
}
else
{
return true;
}
}
buffer = decoder->buffer;
if(buffer == NULL)
{
// Allocate the decoding buffer
#if _ALLOCATOR
buffer = (char *)AllocAligned(decoder->allocator, size, _CACHE_LINE_SIZE);
#else
buffer = (char *)MEMORY_ALIGNED_ALLOC(size, _CACHE_LINE_SIZE);
#endif
if(buffer == NULL)
{
return false;
}
}
#if DEBUG_BUFFER_USAGE
memset(buffer, 1, size);
#endif
// Save the buffer and its size in the decoder
decoder->buffer = buffer;
decoder->buffer_size = size;
// Initialize the scratch space descriptor
InitScratchBuffer(&decoder->scratch, buffer, size);
// allocate buffer for each debayer/color formating thread
{
int i;
size = (width+16)*3*2*4*2*4;// sixteen lines
if(height*4 > width*3) //square or tall images where running out of scratch space for zooms.
size *= 1 + ((height+(width/2))/width);
if (decoder->threads_buffer_size < size)
{
for(i=0;i<_MAX_CPUS;i++)
{
if(decoder->threads_buffer[i])
{
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->threads_buffer[i]);
#else
MEMORY_ALIGNED_FREE(decoder->threads_buffer[i]);
#endif
decoder->threads_buffer[i] = NULL;
}
}
decoder->threads_buffer_size = 0;
}
for(i=0;i<cpus;i++)
{
if(decoder->threads_buffer[i] == NULL)
{
#if _ALLOCATOR
decoder->threads_buffer[i] = (char *)AllocAligned(decoder->allocator, size, _CACHE_LINE_SIZE);
#else
decoder->threads_buffer[i] = (char *)MEMORY_ALIGNED_ALLOC(size, _CACHE_LINE_SIZE);
#endif
if(decoder->threads_buffer[i] == NULL)
{
return false;
}
}
}
decoder->threads_buffer_size = size;
}
// Eventually the scratch space descriptor will replace the buffer and buffer_size fields
return true;
}
bool ResizeDecoderBuffer(DECODER *decoder, int width, int height, int format)
{
// Check that the dimensions are valid
assert(width > 0);
assert(height > 0);
// Just call the allocation routine
return AllocDecoderBuffer(decoder, width, height, format);
}
void ClearTransformFlags(DECODER *decoder)
{
TRANSFORM **transform_array = decoder->transform;
int channel;
for (channel = 0; channel < TRANSFORM_MAX_CHANNELS; channel++)
{
TRANSFORM *transform = transform_array[channel];
int index;
if (transform == NULL) break;
for (index = 0; index < TRANSFORM_MAX_WAVELETS; index++)
{
IMAGE *wavelet = transform->wavelet[index];
if (wavelet != NULL) {
wavelet->band_valid_flags = 0;
wavelet->band_started_flags = 0;
}
}
}
}
// Initialize the tables for decoding the wavelet transforms
void InitWaveletDecoding(DECODER *decoder, int subband_wavelet_index[], int subband_band_index[], int num_subbands)
{
size_t subband_table_size = num_subbands * sizeof(int);
memset(decoder->subband_wavelet_index, 0, sizeof(decoder->subband_wavelet_index));
memcpy(decoder->subband_wavelet_index, subband_wavelet_index, subband_table_size);
memset(decoder->subband_band_index, 0, sizeof(decoder->subband_band_index));
memcpy(decoder->subband_band_index, subband_band_index, subband_table_size);
}
#if 0
static bool IsValidFormat(int format)
{
bool valid_format = true;
//TODO: Change this routine into a switch statement
if(format == COLOR_FORMAT_BYR5)
return true; // can decode to BYR5
if(format == COLOR_FORMAT_BYR4)
return true; // can decode to BYR4
if(format == COLOR_FORMAT_BYR3)
return true; // can decode to BYR3
if(format == COLOR_FORMAT_BYR2)
return true; // can decode to BYR2
if(format == COLOR_FORMAT_RG48)
return true; // can decode to RGB48
if(format == COLOR_FORMAT_RG64)
return true; // can decode to RGBA64
if (format == COLOR_FORMAT_B64A)
{
return true; // Can decode to B64A
}
if (!(COLOR_FORMAT_UNKNOWN < format && format <= MAX_DECODED_COLOR_FORMAT)) {
valid_format = false;
}
return valid_format;
}
#endif
#if _INTERLACED_WORKER_THREADS
void StartInterlaceWorkerThreads(DECODER *decoder)
{
int i;
if(decoder->interlaced_worker.lock_init == 0)
{
// Create events for starting the worker threads
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.start_event[i] = CreateEvent(NULL, false, false, NULL);
}
// Create a semaphore to signal the worker threads to process rows
decoder->interlaced_worker.row_semaphore = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
// Create an event for each worker thread to signal that it has finished
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.done_event[i] = CreateEvent(NULL, false, false, NULL);
}
// Create an event for forcing the worker threads to terminate
decoder->interlaced_worker.stop_event = CreateEvent(NULL, true, false, NULL);
// Zero the count of worker threads that are active
decoder->interlaced_worker.thread_count = 0;
// Initialize the lock for controlling access to the worker thread data
InitializeCriticalSection(&decoder->interlaced_worker.lock);
decoder->interlaced_worker.lock_init = 1;
for (i = 0; i < THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.id[i] = 0;
decoder->interlaced_worker.handle[i] = CreateThread(NULL, 0, InterlacedWorkerThreadProc, decoder, 0, &decoder->interlaced_worker.id[i]);
assert(decoder->interlaced_worker.handle[i] != NULL);
}
}
}
#endif
#if 0
int TestException(int x)
{
static volatile int y1 = 100;
volatile int x1 = x;
return y1 / x1;
}
#endif
// Process device driver request to initialize the decoder
#if _ALLOCATOR
bool DecodeInit(ALLOCATOR *allocator, DECODER *decoder, int width, int height, int format, int resolution, FILE *logfile)
#else
bool DecodeInit(DECODER *decoder, int width, int height, int format, int resolution, FILE *logfile)
#endif
{
CODESET codesets[CODEC_NUM_CODESETS];
int i;
int cpus;
//int x = 0;
#if CODEC_NUM_CODESETS == 3
memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET));
memcpy(&codesets[1], &SECOND_CODESET, sizeof(CODESET));
memcpy(&codesets[2], &THIRD_CODESET, sizeof(CODESET));
#elif CODEC_NUM_CODESETS == 2
memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET));
memcpy(&codesets[1], &SECOND_CODESET, sizeof(CODESET));
#else
memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET));
#endif
#ifdef _WIN32
// Set the handler for system exceptions
SetDefaultExceptionHandler();
#endif
//TestException(x);
// Clear all decoder fields except the logfile and set the codebooks for decoding
InitDecoder(decoder, logfile, &codesets[0]);
#if _ALLOCATOR
decoder->allocator = allocator;
#endif
if(decoder->thread_cntrl.capabilities == 0)
{
// Determine the processor capabilities
SetDecoderCapabilities(decoder);
}
cpus = decoder->thread_cntrl.capabilities >> 16;
assert(cpus > 0 && cpus <= _MAX_CPUS);
// Decode to half resolution?
if (resolution == DECODED_RESOLUTION_HALF)
{
// Reduce the frame size by half in each dimension
width = width/2;
height = height/2;
}
else if (resolution == DECODED_RESOLUTION_QUARTER)
{
// Reduce the frame size by one fourth in each dimension
width = width/4;
height = height/4;
}
// Initialize the codebooks
#if _ALLOCATOR
if (!InitCodebooks(decoder->allocator, codesets)) {
//decoder->error = CODEC_ERROR_INIT_CODEBOOKS;
// The subroutine has already set the error code
return false;
}
#else
if (!InitCodebooks(codesets)) {
//decoder->error = CODEC_ERROR_INIT_CODEBOOKS;
// The subroutine has already set the error code
return false;
}
#endif
// Initize the FSM
InitDecoderFSM(decoder, &codesets[0]);
// Check the frame dimensions and format
//assert(width > 0);
//assert(height > 0);
// assert(IsValidFormat(format));
#if _THREADED_DECODER
// Create a semaphore to signal the transform thread to begin processing
// Initialize the transform queue
decoder->transform_queue.started = 0;
decoder->transform_queue.num_entries = 0;
decoder->transform_queue.next_entry = 0;
decoder->transform_queue.free_entry = 0;
memset(decoder->transform_queue.queue, 0, sizeof(decoder->transform_queue.queue));
#endif
#if _INTERLACED_WORKER_THREADS && _DELAY_THREAD_START==0
StartInterlaceWorkerThreads(decoder);
#endif
#if _THREADED
#if !_DELAY_THREAD_START //start threads now if not _DELAY_THREAD_START
if(cpus > 1)
{
int threads = cpus;
if(threads > 4)
threads = 4;
CreateLock(&decoder->entropy_worker_new.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->entropy_worker_new.pool,
threads,
EntropyWorkerThreadProc,
decoder);
}
// Initialize the lock that controls access to the generic worker thread data
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
cpus,
WorkerThreadProc,
decoder);
#endif
#endif
// Set the frame dimensions and format
SetDecoderFormat(decoder, width, height, format, resolution);
// Allocate the data structure for decoding the samples
AllocDecoderGroup(decoder);
// Note that this code assumes that the samples to decode are groups
// as opposed to isolated frames which are not supported in this code
// Allocate a buffer for storing intermediate results during decoding
if (!AllocDecoderBuffer(decoder, width, height, format)) {
return false;
}
// Should check that the finite state machine tables were initialized
assert(decoder->fsm[0].table.flags < 0);
// Initialize the finite state machine for this decoder
for(i=0; i<CODEC_NUM_CODESETS; i++)
{
InitFSM(&decoder->fsm[i], codesets[i].fsm_table);
#if _COMPANDING
// Scale the values in the finite state machine entries for companding
ScaleFSM(&decoder->fsm[i].table);
#endif
}
// Indicate that the decoder has been initialized
decoder->state = DECODER_STATE_INITIALIZED;
#if (1 && DUMP)
// Write the wavelet bands as images
SetDumpDirectory(CODEC_TYPE(decoder), DUMP_DECODER_DIRECTORY);
SetDumpFilename(CODEC_TYPE(decoder), DUMP_DEFAULT_FILENAME);
SetDumpChannelMask(CODEC_TYPE(decoder), 1/*ULONG_MAX*/);
// SetDumpWaveletMask(CODEC_TYPE(decoder), 7<<4 | 1/*ULONG_MAX*/);
SetDumpWaveletMask(CODEC_TYPE(decoder), ULONG_MAX);
// Set this flag to enable output
decoder->dump.enabled = true;
#endif
#if _TIMING
// Initialize the global timers and counters
InitTiming();
#endif
//DAN20160203 Fix for a memory leak in InitCookbooks
for (i = 0; i < CODEC_NUM_CODESETS; i++)
{
#if _ALLOCATOR
Free(allocator, codesets[i].codebook_runbook); codesets[i].codebook_runbook = NULL;
Free(allocator, codesets[i].fastbook); codesets[i].fastbook = NULL;
Free(allocator, codesets[i].valuebook); codesets[i].valuebook = NULL;
#else
MEMORY_FREE(codesets[i].codebook_runbook); codesets[i].codebook_runbook = NULL;
MEMORY_FREE(codesets[i].fastbook); codesets[i].fastbook = NULL;
MEMORY_FREE(codesets[i].valuebook); codesets[i].valuebook = NULL;
#endif
}
// The decoder has been initialized successfully
return true;
}
void DecodeEntropyInit(DECODER *decoder)
{
int cpus = 1;
if(decoder->thread_cntrl.capabilities == 0)
{
// Determine the processor capabilities
SetDecoderCapabilities(decoder);
}
cpus = decoder->thread_cntrl.capabilities >> 16;
if (cpus > (int)decoder->cfhddata.cpu_limit && decoder->cfhddata.cpu_limit)
{
cpus = decoder->cfhddata.cpu_limit;
decoder->thread_cntrl.limit = cpus;
decoder->thread_cntrl.set_thread_params = 1;
decoder->thread_cntrl.capabilities &= 0xffff;
decoder->thread_cntrl.capabilities |= cpus<<16;
}
assert(cpus > 0 && cpus <= _MAX_CPUS);
#if _THREADED
#if _DELAY_THREAD_START //start threads now if not _DELAY_THREAD_START
if(cpus > 1 && decoder->entropy_worker_new.pool.thread_count == 0)
{
int threads = cpus;
if(threads > 4)
threads = 4;
CreateLock(&decoder->entropy_worker_new.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->entropy_worker_new.pool,
threads,
EntropyWorkerThreadProc,
decoder);
}
#endif
#endif
}
bool DecodeOverrides(DECODER *decoder, unsigned char *overrideData, int overrideSize)
{
if(decoder->overrideData)
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->overrideData);
#else
MEMORY_FREE(decoder->overrideData);
#endif
decoder->overrideData = NULL;
decoder->overrideSize = 0;
}
if(overrideSize)
{
#if _ALLOCATOR
decoder->overrideData = Alloc(decoder->allocator, overrideSize);
#else
decoder->overrideData = MEMORY_ALLOC(overrideSize);
#endif
if(decoder->overrideData)
{
memcpy(decoder->overrideData, overrideData, overrideSize);
decoder->overrideSize = overrideSize;
}
}
else
{
int i;
for(i=METADATA_PRIORITY_OVERRIDE; i<=METADATA_PRIORITY_MAX; i++) //This was 0 to max but that cause right eye primary corrections(side-by-side) mode to flicker.
// This database cleariing was added but I don't know why.
{
if(decoder->DataBases[i])
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->DataBases[i]);
#else
MEMORY_FREE(decoder->DataBases[i]);
#endif
decoder->DataBases[i] = NULL;
decoder->DataBasesSize[i] = 0;
decoder->DataBasesAllocSize[i] = 0;
}
}
}
return true;
}
TRANSFORM *AllocGroupTransform(GROUP *group, int channel)
{
#if _ALLOCATOR
//TODO:ALLOC Change this routine to take an allocator as the first argument
ALLOCATOR *allocator = NULL;
#endif
TRANSFORM *transform;
// Channel zero is a special case because it may mean
// that the group header has not been decoded yet
if (channel != 0)
{
// Make sure that the channel number is in range
assert(0 <= channel && channel < group->header.num_channels);
if (!(0 <= channel && channel < group->header.num_channels))
return NULL;
}
transform = group->transform[channel];
// Need to allocate a transform data structure?
if (transform == NULL) {
#if _ALLOCATOR
transform = (TRANSFORM *)Alloc(allocator, sizeof(TRANSFORM));
#else
transform = (TRANSFORM *)MEMORY_ALLOC(sizeof(TRANSFORM));
#endif
assert(transform != NULL);
if (transform == NULL) return NULL;
memset(transform, 0, sizeof(TRANSFORM));
group->transform[channel] = transform;
#if _TIMING
alloc_transform_count++;
#endif
}
return transform;
}
//extern FILE *logfile;
void EraseOutputBuffer(uint8_t *buffer, int width, int height, int32_t pitch, int format)
{
size_t size = height * pitch;
union {
uint8_t byte[4];
uint32_t word;
} output;
switch (format)
{
case DECODED_FORMAT_YUYV:
output.byte[0] = COLOR_LUMA_BLACK;
output.byte[1] = COLOR_CHROMA_ZERO;
output.byte[2] = COLOR_LUMA_BLACK;
output.byte[3] = COLOR_CHROMA_ZERO;
break;
default:
//if (logfile) fprintf(logfile,"**Unknown format: %d\n", format);
//assert(0);
output.word = 0;
break;
}
memset(buffer, output.word, size);
}
// Decode the coefficients in a subband
bool DecodeSampleSubband(DECODER *decoder, BITSTREAM *input, int subband);
// Decode the coefficients in a lowpass band
bool DecodeSampleLowPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet);
// Decode the coefficients in a highpass band
bool DecodeSampleHighPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band, int threading);
// Decode an empty band
bool DecodeSampleEmptyBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band);
bool DecodeBand16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height);
bool DecodeBand16sLossless(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height);
// Decode a sample channel header
bool DecodeSampleChannelHeader(DECODER *decoder, BITSTREAM *input);
// Apply the inverse horizontal-temporal transform to reconstruct the output frame
void ReconstructSampleFrameToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
#if 0
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
uint8_t *frame1, uint8_t *frame2, int output_pitch,
FRAME_INFO *info, char *buffer, size_t buffer_size);
#else
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
int frame_index, uint8_t *output, int output_pitch,
FRAME_INFO *info, const SCRATCH *scratch, int precision);
#endif
// Copy the quarter resolution lowpass channels from the spatial transform
void CopyQuarterFrameToBuffer(TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision);
// Convert the quarter resolution lowpass channels to the specified output format
void ConvertQuarterFrameToBuffer(DECODER *decoder, TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision);
// Routines for converting the new encoded formats to the requested output format
CODEC_ERROR ReconstructSampleFrameRGB444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameRGBA4444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameYUVA4444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
// The first Bayer routine calls the other Bayer routines for the decoded resolution
CODEC_ERROR ReconstructSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameDeBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameBayerHalfToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameBayerQuarterToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
CODEC_ERROR UncompressedSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR UncompressedSampleFrameYUVToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR UncompressedSampleFrameRGBToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
// New code for handling the original YUV 4:2:2 encoded format
CODEC_ERROR ReconstructSampleFrameYUV422ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
// Return true if the rest of the channel does not have to be decoded
static bool CanSkipChannel(DECODER *decoder, int resolution)
{
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
TRANSFORM *transform = decoder->transform[channel];
int transform_type = transform->type;
// Can the rest of the channel be skipped?
if (transform_type == TRANSFORM_TYPE_FIELDPLUS)
{
switch (resolution)
{
case DECODED_RESOLUTION_HALF:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & DECODED_SUBBAND_MASK_HALF) == DECODED_SUBBAND_MASK_HALF);
break;
case DECODED_RESOLUTION_QUARTER:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & DECODED_SUBBAND_MASK_QUARTER) == DECODED_SUBBAND_MASK_QUARTER);
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
return (codec->decoded_subband_flags & 1);
break;
default:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if(decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY)
{
// If we are requesting a YUV decode we don't need the 4th channel
if(codec->channel == 3)
{
return true;
}
}
}
break;
}
}
else
{
const uint32_t decoded_subband_mask_half = 0x7F;
const uint32_t decoded_subband_mask_quarter = 0x0F;
assert(transform_type == TRANSFORM_TYPE_SPATIAL);
switch (resolution)
{
case DECODED_RESOLUTION_HALF:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & decoded_subband_mask_half) == decoded_subband_mask_half);
break;
case DECODED_RESOLUTION_QUARTER:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & decoded_subband_mask_quarter) == decoded_subband_mask_quarter);
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
return (codec->decoded_subband_flags & 1);
break;
default:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if(decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY)
{
// If we are requesting a YUV decode we don't need the 4th channel
if(codec->channel == 3)
{
return true;
}
}
}
break;
}
}
// Cannot skip the rest of the channel
return false;
}
#if 0
static bool CanSkipSubband(DECODER *decoder, int subband)
{
// Bitmask indicates which subbands must be decoded for quarter resolution
static uint32_t quarter_resolution_mask = 0x008F;
// Convert the subband number into a bitmask (could use a lookup table)
uint32_t subband_mask = SUBBAND_MASK(subband);
// Select the resolution of the fully decoded frames
int resolution = decoder->frame.resolution;
switch (resolution)
{
case DECODED_RESOLUTION_QUARTER:
//if (4 <= subband && subband <= 6)
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if ((subband_mask & quarter_resolution_mask) == 0) {
return true;
}
}
break;
default:
// Assume that the subband must be decoded
break;
}
return false;
}
#endif
// Return true if the wavelet exists and all bands are valid
static bool AllBandsValid(IMAGE *wavelet)
{
return (wavelet != NULL && BANDS_ALL_VALID(wavelet));
}
#if DEBUG
static bool AllTransformBandsValid(TRANSFORM *transform_array[], int num_channels, int frame_index)
{
int channel;
if (!(1 <= num_channels && num_channels <= TRANSFORM_MAX_CHANNELS)) {
assert(0);
return false;
}
if (!(0 <= frame_index && frame_index < TRANSFORM_MAX_FRAMES)) {
assert(0);
return false;
}
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform_array[channel]->wavelet[frame_index];
if (!AllBandsValid(wavelet))
{
return false;
}
}
// All wavelet bands in all channels are valid
return true;
}
static bool AllLowpassBandsValid(TRANSFORM *transform_array[], int num_channels, int frame_index)
{
int channel;
if (!(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS)) {
return false;
}
if (!(0 <= frame_index && frame_index < TRANSFORM_MAX_FRAMES)) {
return false;
}
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform_array[channel]->wavelet[frame_index];
if (!(wavelet != NULL && wavelet->band_valid_flags & BAND_VALID_MASK(0))) {
return false;
}
}
// All lowpass bands in all channels are valid
return true;
}
#endif
static bool
ComputeFrameDimensionsFromFirstWavelet(int transform_type,
int first_wavelet_width,
int first_wavelet_height,
int *frame_width_out,
int *frame_height_out)
{
int frame_width;
int frame_height;
int expansion = 8;
switch (transform_type)
{
case TRANSFORM_TYPE_SPATIAL:
frame_width = first_wavelet_width * expansion;
frame_height = first_wavelet_height * expansion;
break;
case TRANSFORM_TYPE_FIELDPLUS:
frame_width = first_wavelet_width * expansion;
frame_height = first_wavelet_height * expansion;
break;
default:
assert(0);
return false;
}
// Return the frame dimensions
*frame_width_out = frame_width;
*frame_height_out = frame_height;
return true;
}
// Decode the sample header to determine the type of sample and other parameters
bool ParseSampleHeader(BITSTREAM *input, SAMPLE_HEADER *header)
{
TAGVALUE segment;
int sample_type;
int sample_size = 0;
// Group index
uint32_t channel_size[TRANSFORM_MAX_CHANNELS];
// Number of channels in the group index
int channel_count;
// Values used for computing the frame width and height (if necessary)
int transform_type = -1;
int first_wavelet_width = 0;
int first_wavelet_height = 0;
int display_height = 0;
int current_channel = 0;
int currentVideoChannel = header->videoChannels;
int find_lowpass_bands = header->find_lowpass_bands & 1;
int find_uncompressed = header->find_lowpass_bands & 2 ? 1 : 0;
int find_header_info_only = header->find_lowpass_bands & 4 ? 1 : 0;
if (header == NULL) {
return false;
}
if(currentVideoChannel == 0)
currentVideoChannel = 1;
// Clear the entire sample header to prevent early return from this routine
memset(header, 0, sizeof(SAMPLE_HEADER));
// Clear the error code
header->error = CODEC_ERROR_OKAY;
// Initialize the frame dimensions to unknown
header->width = 0;
header->height = 0;
header->videoChannels = 1;
// Initialize the original pixel format to unknown
header->input_format = COLOR_FORMAT_UNKNOWN;
// Initialize the encoded format to unknown
header->encoded_format = ENCODED_FORMAT_UNKNOWN;
// Clear the frame number in case it is not present in the sample
header->frame_number = 0;
// The video is not progressive if the sample flags are not present
header->hdr_progressive = false;
#if _BITSTREAM_UNALIGNED
// Record the alignment of the bitstream within the sample
SetBitstreamAlignment(input, 0);
#endif
sample_size = input->nWordsUsed;
// Get the type of sample (should be the first tag value pair)
segment = GetTagValue(input);
assert(segment.tuple.tag == CODEC_TAG_SAMPLE);
if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) {
header->error = CodecErrorBitstream(input);
return false;
}
sample_type = segment.tuple.value;
switch (sample_type)
{
case SAMPLE_TYPE_GROUP: // Group of frames
header->key_frame = true;
header->difference_frame = false;
header->droppable_frame = false;
break;
case SAMPLE_TYPE_FRAME: // The second or later frame in a group
header->key_frame = false;
header->difference_frame = true;
header->droppable_frame = true;
break;
case SAMPLE_TYPE_IFRAME: // One frame in the group
header->key_frame = true;
header->difference_frame = false;
header->droppable_frame = true;
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
// Treat the video sequence header like a keyframe that can be dropped
header->key_frame = true;
header->difference_frame = false;
header->droppable_frame = true;
break;
default:
// Unknown type of sample
header->error = CODEC_ERROR_SAMPLE_TYPE;
return false;
break;
}
// Continue parsing the sample header until all of the information has been found
while ( (find_lowpass_bands == 1 && current_channel < 3) || //parse all
(find_uncompressed == 1 && current_channel < 1) ||
display_height == 0 ||
header->width == 0 ||
header->height == 0 ||
header->input_format == COLOR_FORMAT_UNKNOWN ||
header->frame_number == 0 ||
(header->interlaced_flags == 0 && header->hdr_progressive == 0))
{
int chunksize = 0;
// Get the next tag value pair from the bitstream
segment = GetSegment(input);
// Did the bitstream end before the last tag was found?
if (input->error == BITSTREAM_ERROR_UNDERFLOW) {
break;
}
// Did an error occur while reading the bitstream?
if (input->error != BITSTREAM_ERROR_OKAY) {
header->error = CodecErrorBitstream(input);
return false;
}
// Is this an optional tag?
if (segment.tuple.tag < 0) {
segment.tuple.tag = NEG(segment.tuple.tag);
}
if(segment.tuple.tag & 0x2000)
{
chunksize = segment.tuple.value;
chunksize &= 0xffff;
chunksize += ((segment.tuple.tag&0xff)<<16);
}
else if(segment.tuple.tag & 0x4000)
{
chunksize = segment.tuple.value;
chunksize &= 0xffff;
}
// else if(tag == CODEC_TAG_INDEX) // handled below
// {
// chunksize = value;
// chunksize &= 0xffff;
// }
else
{
chunksize = 0;
}
if((int)(segment.tuple.tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || segment.tuple.tag & 0x6000)
{
int skip = 1;
if((segment.tuple.tag & 0xff00) == 0x2200) //sample size
{
if(sample_size < chunksize*4)
find_header_info_only = 1;
skip = find_header_info_only;
if(currentVideoChannel <= 1 && header->videoChannels == 2 && !find_header_info_only)
{
BITSTREAM input2;
SAMPLE_HEADER header2;
BITWORD *eye2 = (BITWORD *)(input->lpCurrentWord + chunksize*4);
int eye_offset = sample_size - input->nWordsUsed + chunksize*4; //approx
int eye_sample_size = input->nWordsUsed - eye_offset;
// Search for first sample of the next frame
while((eye2[1] != (uint8_t)CODEC_TAG_SAMPLE || eye2[0] != 0 || eye2[2] != 0) && eye_sample_size > 0)
{
eye2 += 4;
chunksize ++;
eye_offset += 4;
eye_sample_size -= 4;
}
// Save the offset to the right stereo sample
header->left_sample_size = eye_offset;
{
InitBitstreamBuffer(&input2, eye2, eye_sample_size, BITSTREAM_ACCESS_READ);
memset(&header2, 0, sizeof(SAMPLE_HEADER));
header2.find_lowpass_bands = 1;
currentVideoChannel++;
header2.videoChannels = currentVideoChannel;
if(ParseSampleHeader(&input2, &header2))
{
int i;
for(i=0;i<4;i++)
{
if(header2.thumbnail_channel_offsets[i])
header->thumbnail_channel_offsets_2nd_Eye[i] = eye_offset + header2.thumbnail_channel_offsets[i];
}
}
}
}
}
if((segment.tuple.tag & 0xff00) == 0x2300) //uncompressed sample size
{
header->hdr_uncompressed = 1;
skip = 1;
if(find_lowpass_bands != 1)
break;
}
if((segment.tuple.tag & 0xff00) == 0x2100) //level
{
if(find_lowpass_bands == 1)
{
skip = 0;
}
else
{
skip = 1; // no header data after the fix level
break;
}
}
if(chunksize)
{
if(skip)
{
input->lpCurrentWord += chunksize*4;
input->nWordsUsed -= chunksize*4;
}
}
else
{
switch (segment.tuple.tag)
{
case CODEC_TAG_VERSION: // Version number of the encoder used in each GOP.
header->encoder_version = (((segment.tuple.value>>12) & 0xf)<<16) |
(((segment.tuple.value>>8) & 0xf)<<8) |
((segment.tuple.value) & 0xff);
break;
case CODEC_TAG_INDEX:
// Get the number of channels in the index to skip
channel_count = segment.tuple.value;
DecodeGroupIndex(input, (uint32_t *)&channel_size[0], channel_count);
break;
case CODEC_TAG_FRAME_WIDTH:
// Record the frame width in the sample header
header->width = segment.tuple.value;
break;
case CODEC_TAG_FRAME_HEIGHT:
// Record the frame height in the sample header
header->height = segment.tuple.value;
break;
case CODEC_TAG_FRAME_DISPLAY_HEIGHT:
display_height = segment.tuple.value;
break;
case CODEC_TAG_LOWPASS_WIDTH:
// Save the width of the smallest wavelet for computing the frame dimensions
first_wavelet_width = segment.tuple.value;
break;
case CODEC_TAG_LOWPASS_HEIGHT:
// Save the height of the smallest wavelet for computing the frame dimensions
first_wavelet_height = segment.tuple.value;
break;
case CODEC_TAG_TRANSFORM_TYPE:
// Save the type of transform for computing the frame dimensions (if necessary)
transform_type = segment.tuple.value;
break;
case CODEC_TAG_INPUT_FORMAT:
// Record the original format of the encoded frames
header->input_format = (COLOR_FORMAT)segment.tuple.value;
break;
case CODEC_TAG_ENCODED_FORMAT:
case CODEC_TAG_OLD_ENCODED_FORMAT:
// Record the encoded format (internal representation)
header->encoded_format = (ENCODED_FORMAT)segment.tuple.value;
if(header->encoded_format == ENCODED_FORMAT_RGBA_4444 && channel_count == 3)
header->encoded_format = ENCODED_FORMAT_RGB_444;
break;
case CODEC_TAG_FRAME_NUMBER:
// Record the frame number for debugging
header->frame_number = segment.tuple.value;
break;
case CODEC_TAG_INTERLACED_FLAGS:
// Record the flags that indicate the field type
header->interlaced_flags = segment.tuple.value;
break;
case CODEC_TAG_SAMPLE_FLAGS:
// The sample flags specify progressive versus interlaced decoding
header->hdr_progressive = !!(segment.tuple.value & SAMPLE_FLAGS_PROGRESSIVE);
if (header->hdr_progressive) {
// Clear the interlaced flags
header->interlaced_flags = 0;
}
break;
case CODEC_TAG_LOWPASS_SUBBAND:
if(segment.tuple.value == 0) // low pass band
{
int count = 8;
uint32_t *lptr = (uint32_t *)input->lpCurrentWord;
do
{
uint32_t longword = SwapInt32(lptr[count]);
unsigned short t,v;
t = (longword>>16) & 0xffff;
v = (longword) & 0xffff;
if (t == CODEC_TAG_MARKER && IsLowPassBandMarker(v) && current_channel < 4)
{
header->thumbnail_channel_offsets[current_channel] = (sample_size - input->nWordsUsed) + count*4 + 4;
break;
}
count++;
} while(count < 32);
current_channel++;
}
break;
case CODEC_TAG_ENCODED_CHANNELS:
if(header->videoChannels == 1)
{
header->videoChannels = segment.tuple.value;
if(header->videoChannels < 1)
header->videoChannels = 1;
}
break;
case CODEC_TAG_QUALITY_L: //
header->encode_quality &= 0xffff0000;
header->encode_quality |= segment.tuple.value;
break;
case CODEC_TAG_QUALITY_H: //
header->encode_quality &= 0xffff;
header->encode_quality |= segment.tuple.value<<16;
break;
}
// Have the encoded frame dimensions been computed?
if (header->width == 0 || header->height == 0)
{
// Found the first wavelet in the bitstream?
if (transform_type >= 0 && first_wavelet_width > 0 && first_wavelet_height > 0)
{
// The group header did not contain tags for the frame dimensions
// prior to the release of support for RGB 4:4:4, so must attempt to
// compute the frame dimensions from the dimensions of the lowpass band.
int frame_width = 0;
int frame_height = 0;
// Use the dimensions of the first wavelet to compute the frame width and height
if (!ComputeFrameDimensionsFromFirstWavelet(transform_type,
first_wavelet_width,
first_wavelet_height,
&frame_width,
&frame_height)) {
// Could not compute the frame dimensions
header->error = CODEC_ERROR_FRAME_DIMENSIONS;
return false;
}
// Save the frame dimensions in the sample header
header->width = frame_width;
header->height = frame_height;
// No more header information after finding the lowpass band
break;
}
}
if(find_lowpass_bands != 1 && find_uncompressed != 1)
{
// No more header information after the first encoded band
if (segment.tuple.tag == CODEC_TAG_BAND_NUMBER)
{
// Stop looking for header information
break;
}
// No more header information after the frame index
if (segment.tuple.tag == CODEC_TAG_FRAME_INDEX)
{
// Stop looking for header information
break;
}
// No more header information after the lowpass band header
if (segment.tuple.tag == CODEC_TAG_PIXEL_DEPTH)
{
// Stop looking for header information
break;
}
}
}
}
}
if (header->width == 0 || header->height == 0) {
assert(0);
}
// Fill in the encoded format if it was not present in the header
if (header->encoded_format == ENCODED_FORMAT_UNKNOWN) {
header->encoded_format = GetEncodedFormat(header->input_format, header->encode_quality, channel_count);
}
if (display_height > 0) {
header->height = display_height;
}
if (header->encoded_format == ENCODED_FORMAT_BAYER)
{
header->width *= 2;
header->height *= 2;
if(display_height == 0)
{
if(header->height == 1088)
header->height = 1080;
}
}
// Return true if the header was parsed completely and correctly
return (header->width > 0 &&
header->height > 0 &&
((sample_type == SAMPLE_TYPE_FRAME) ||
(header->input_format != COLOR_FORMAT_UNKNOWN &&
header->encoded_format != ENCODED_FORMAT_UNKNOWN)));
// It is not an error if the frame number was not found in the sample header
}
bool DumpSampleHeader(BITSTREAM *input, FILE *logfile)
{
TAGVALUE segment;
int lowpass_width = 0;
int lowpass_height = 0;
// Parse the sample header until the lowpass band is found
while (lowpass_width == 0 && lowpass_height == 0)
{
// Get the next tag value pair from the bitstream
segment = GetSegment(input);
// Did an error occur while reading the bitstream?
if (input->error != BITSTREAM_ERROR_OKAY) {
return false;
}
// Is this an optional tag?
if (segment.tuple.tag < 0) {
segment.tuple.tag = NEG(segment.tuple.tag);
}
// Check that the tag is valid
assert(CODEC_TAG_ZERO < segment.tuple.tag && segment.tuple.tag <= CODEC_TAG_LAST_NON_SIZED);
switch (segment.tuple.tag)
{
case CODEC_TAG_SAMPLE:
fprintf(logfile, "Sample type: %d\n", segment.tuple.value);
break;
case CODEC_TAG_FRAME_WIDTH:
fprintf(logfile, "Frame width: %d\n", segment.tuple.value);
break;
case CODEC_TAG_FRAME_HEIGHT:
fprintf(logfile, "Frame height: %d\n", segment.tuple.value);
break;
case CODEC_TAG_LOWPASS_WIDTH:
lowpass_width = segment.tuple.value;
fprintf(logfile, "Lowpass width: %d\n", lowpass_width);
break;
case CODEC_TAG_LOWPASS_HEIGHT:
lowpass_height = segment.tuple.value;
fprintf(logfile, "Lowpass height: %d\n", lowpass_height);
break;
case CODEC_TAG_TRANSFORM_TYPE:
fprintf(logfile, "Transform type: %d\n", segment.tuple.value);
break;
case CODEC_TAG_INPUT_FORMAT:
fprintf(logfile, "Input format: %d\n", segment.tuple.value);
break;
case CODEC_TAG_ENCODED_FORMAT:
case CODEC_TAG_OLD_ENCODED_FORMAT:
fprintf(logfile, "Encoded format: %d\n", segment.tuple.value);
break;
case CODEC_TAG_FRAME_NUMBER:
fprintf(logfile, "Frame number: %d\n", segment.tuple.value);
break;
}
}
return true;
}
int SkipVideoChannel(DECODER *decoder, BITSTREAM *input, int skip_to_channel) // 3D work
{
TAGWORD tag,value=1;
unsigned char *pos = NULL;
int readsize = input->nWordsUsed;
if(readsize > 4096) // only need to scan the first few tuplets
{
readsize = 4096;
}
else
{
//Tiny therefore P-frame, nothing to be read so:
value=decoder->real_channels; // return the last value.
return value;
}
pos = GetTupletAddr(input->lpCurrentBuffer, readsize, CODEC_TAG_ENCODED_CHANNELS, &value);
if(pos && value>1 && skip_to_channel>1)
{
int chunksize = 0;
intptr_t offset;
int count = 0;
do
{
tag = *pos++<<8;
tag |= *pos++;
value = *pos++<<8;
value |= *pos++;
if (tag < 0)
{
tag = NEG(tag);
}
} while((tag & 0xff00) != CODEC_TAG_SAMPLE_SIZE && count++ < 10);
if((tag & 0xff00) == CODEC_TAG_SAMPLE_SIZE)
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
offset = ((intptr_t)pos - (intptr_t)input->lpCurrentWord) + chunksize*4;
input->lpCurrentWord += offset;
input->nWordsUsed -= (int)offset;
{
uint8_t *tag = (uint8_t *)input->lpCurrentWord;
// Search for first sample of the next frame
while((tag[1] != (uint8_t)CODEC_TAG_SAMPLE || tag[0] != 0 || tag[2] != 0) && input->nWordsUsed > 0)
{
input->lpCurrentWord += 4;
input->nWordsUsed -= 4;
tag += 4;
}
}
}
}
//if(value == 0) value = 1; // old non-stereo file
return value;
}
#define SUBPIXEL 64
static short gains[SUBPIXEL+1][4] = {
{0*128,0*128,0x7fff,0*128},
{0*128,2*128,0x7fff,-2*128},
{0*128,5*128,255*128,-4*128},
{0*128,8*128,254*128,-6*128},
{0*128,11*128,253*128,-8*128},
{0*128,14*128,252*128,-10*128},
{0*128,18*128,250*128,-12*128},
{0*128,21*128,248*128,-13*128},
{-1*128,25*128,247*128,-15*128},
{-1*128,29*128,244*128,-16*128},
{-1*128,33*128,241*128,-17*128},
{-2*128,37*128,239*128,-18*128},
{-2*128,41*128,236*128,-19*128},
{-3*128,46*128,233*128,-20*128},
{-3*128,50*128,229*128,-20*128},
{-4*128,55*128,226*128,-21*128},
{-4*128,60*128,221*128,-21*128},
{-5*128,65*128,217*128,-21*128},
{-5*128,70*128,213*128,-22*128},
{-6*128,75*128,209*128,-22*128},
{-7*128,80*128,205*128,-22*128},
{-7*128,85*128,199*128,-21*128},
{-8*128,91*128,194*128,-21*128},
{-9*128,96*128,190*128,-21*128},
{-10*128,102*128,185*128,-21*128},
{-10*128,107*128,179*128,-20*128},
{-11*128,113*128,174*128,-20*128},
{-12*128,118*128,169*128,-19*128},
{-13*128,124*128,164*128,-19*128},
{-14*128,129*128,159*128,-18*128},
{-14*128,135*128,152*128,-17*128},
{-15*128,141*128,147*128,-17*128},
{-16*128,144*128,144*128,-16*128},
{-17*128,147*128,141*128,-15*128},
{-17*128,152*128,135*128,-14*128},
{-18*128,159*128,129*128,-14*128},
{-19*128,164*128,124*128,-13*128},
{-19*128,169*128,118*128,-12*128},
{-20*128,174*128,113*128,-11*128},
{-20*128,179*128,107*128,-10*128},
{-21*128,185*128,102*128,-10*128},
{-21*128,190*128,96*128,-9*128},
{-21*128,194*128,91*128,-8*128},
{-21*128,199*128,85*128,-7*128},
{-22*128,205*128,80*128,-7*128},
{-22*128,209*128,75*128,-6*128},
{-22*128,213*128,70*128,-5*128},
{-21*128,217*128,65*128,-5*128},
{-21*128,221*128,60*128,-4*128},
{-21*128,226*128,55*128,-4*128},
{-20*128,229*128,50*128,-3*128},
{-20*128,233*128,46*128,-3*128},
{-19*128,236*128,41*128,-2*128},
{-18*128,239*128,37*128,-2*128},
{-17*128,241*128,33*128,-1*128},
{-16*128,244*128,29*128,-1*128},
{-15*128,247*128,25*128,-1*128},
{-13*128,248*128,21*128,0*128},
{-12*128,250*128,18*128,0*128},
{-10*128,252*128,14*128,0*128},
{-8*128,253*128,11*128,0*128},
{-6*128,254*128,8*128,0*128},
{-4*128,255*128,5*128,0*128},
{-2*128,0x7fff,2*128,0*128},
{0*128,0*128,0x7fff,0*128}
};
static int lanczos[256] =
{
0,
-2,
-8,
-18,
-33,
-53,
-77,
-106,
-141,
-179,
-223,
-272,
-325,
-384,
-447,
-514,
-586,
-662,
-742,
-826,
-913,
-1004,
-1097,
-1193,
-1290,
-1389,
-1490,
-1591,
-1692,
-1792,
-1892,
-1990,
-2086,
-2179,
-2269,
-2355,
-2436,
-2511,
-2580,
-2643,
-2697,
-2744,
-2781,
-2809,
-2826,
-2832,
-2826,
-2808,
-2776,
-2730,
-2670,
-2594,
-2503,
-2395,
-2271,
-2129,
-1969,
-1790,
-1593,
-1377,
-1141,
-886,
-611,
-315,
0,
336,
692,
1069,
1466,
1884,
2321,
2778,
3255,
3750,
4265,
4797,
5347,
5914,
6498,
7097,
7711,
8340,
8982,
9636,
10301,
10977,
11663,
12357,
13058,
13765,
14477,
15192,
15910,
16630,
17349,
18066,
18781,
18871,
19580,
20285,
20986,
21678,
22361,
23035,
23697,
24348,
24983,
25604,
26206,
26790,
27354,
27898,
28419,
28915,
29387,
29832,
30249,
30638,
30997,
31326,
31623,
31886,
32117,
32314,
32476,
32603,
32695,
32749,
32767, //was 32768, issue for SSE2
32749,
32695,
32603,
32476,
32314,
32117,
31886,
31623,
31326,
30997,
30638,
30249,
29832,
29387,
28915,
28419,
27898,
27354,
26790,
26206,
25604,
24983,
24348,
23697,
23035,
22361,
21678,
20986,
20285,
19580,
18871,
18159,
18066,
17349,
16630,
15910,
15192,
14477,
13765,
13058,
12357,
11663,
10977,
10301,
9636,
8982,
8340,
7711,
7097,
6498,
5914,
5347,
4797,
4265,
3750,
3255,
2778,
2321,
1884,
1466,
1069,
692,
336,
0,
-315,
-611,
-886,
-1141,
-1377,
-1593,
-1790,
-1969,
-2129,
-2271,
-2395,
-2503,
-2594,
-2670,
-2730,
-2776,
-2808,
-2826,
-2832,
-2826,
-2809,
-2781,
-2744,
-2697,
-2643,
-2580,
-2511,
-2436,
-2355,
-2269,
-2179,
-2086,
-1990,
-1892,
-1792,
-1692,
-1591,
-1490,
-1389,
-1290,
-1193,
-1097,
-1004,
-913,
-826,
-742,
-662,
-586,
-514,
-447,
-384,
-325,
-272,
-223,
-179,
-141,
-106,
-77,
-53,
-33,
-18,
-8,
-2,
};
void RGB48VerticalShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer,
int widthbytes, int height, int pitch, float offset,
float zoom)
{
float yposf,ystepf;
int x;
//int endofSSEline = 0;
unsigned short *scanline[4];
//int spitch = pitch/2;
int neg = 0,step;
__m128i lA,lB,lC,lD,gA,gB,gC,gD,o128,t1;
__m128i *lineA, *lineB, *lineC, *lineD, *outline128;
offset = -offset;
yposf = height * offset;
yposf = (float)height*(0.5f - 1.0f/(2.0f*zoom) - offset);
ystepf = 1.0f/zoom;
if(yposf < 0.0)
neg = 1;
if(pitch < 0)
yposf -= ystepf;
/* yposi = floor(yposf);
remainf = yposf - (float)yposi;
tablepos = (remainf*(float)SUBPIXEL);
yposi = abs(yposi);
if(yposi==0 && tablepos == 0)
return; // no move required
*/
// -3 , 0 best small notch at zero?
//
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
step = 16;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
default:
step = 32;
break;
}
{
static char zeroline[1024] = {0};
int y,yoffset = ((int)(yposf-2.0)),yend = ((int)(yposf+2.0+ystepf*height));
unsigned char *src = (unsigned char *)RGB48;
unsigned char *dst = (unsigned char *)RGB48;
unsigned char *ptr = (unsigned char *)buffer;
if(yoffset < 0) yoffset = 0;
if(yend > height) yend = height;
src += pitch * yoffset;
for(y=yoffset; y<yend; y++)
{
memcpy(ptr, src, widthbytes);
ptr += widthbytes;
src += pitch;
}
ptr = (unsigned char *)buffer;
for(y=0;y<height; y++)
{
int i,t,yp = ((int)yposf);
int rmdr = 63-((int)(yposf*64.0) & 63);
int gains[4];
yp -= 1; // use -2 cause a image down shift //DAN20100225
t = 0;
for(i=0; i<4; i++)
{
if(yp<0 || yp>= height) // skip 0 line as the top line was zagged
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)zeroline;
}
else
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)&ptr[widthbytes*(yp-yoffset)];
}
yp++;
rmdr+=64;
}
if(t)
{
__m128i half;
gA = _mm_set1_epi16(gains[0]);
gB = _mm_set1_epi16(gains[1]);
gC = _mm_set1_epi16(gains[2]);
gD = _mm_set1_epi16(gains[3]);
outline128 = (__m128i *)dst;
lineA = (__m128i *)scanline[0];
lineB = (__m128i *)scanline[1];
lineC = (__m128i *)scanline[2];
lineD = (__m128i *)scanline[3];
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_WP13:
for(x=0;x<widthbytes; x+=step)
{
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
_mm_storeu_si128(outline128++, o128);
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
_mm_storeu_si128(outline128++, o128);
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_RG48:
for(x=0;x<widthbytes; x+=step)
{
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
_mm_storeu_si128(outline128++, o128);
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
_mm_storeu_si128(outline128++, o128);
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
for(x=0;x<widthbytes; x+=step)
{
lA = _mm_loadu_si128(lineA);
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB);
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC);
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD);
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
half = o128;
lA = _mm_loadu_si128(lineA++);
lA = _mm_unpacklo_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB++);
lB = _mm_unpacklo_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC++);
lC = _mm_unpacklo_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD++);
lD = _mm_unpacklo_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
half = _mm_srli_epi16(half,8);
o128 = _mm_srli_epi16(o128,8);
o128 = _mm_packus_epi16(o128, half);
_mm_storeu_si128(outline128++, o128);
}
break;
}
}
else
{
if(decoder->StereoBufferFormat == DECODED_FORMAT_YUYV)
{
memset(dst, 0x10801080, widthbytes);
}
else
{
memset(dst, 0, widthbytes);
}
}
yposf += ystepf;
dst += pitch;
}
/*ptr = (unsigned char *)buffer;
for(y=0;y<height; y++)
{
int r,g,b,yp = ((int)yposf);
yposf += ystepf;
if(yp<0 || yp>= height)
{
memset(dst, 0, widthbytes);
}
else
{
memcpy(dst, &ptr[widthbytes*yp], widthbytes);
}
dst += pitch;
}*/
}
}
void RGB48VerticalShiftZoomFine(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer,
int widthbytes, int height, int pitch, float offset,
float zoom, int xx)
{
float yposf,ystepf;
//int endofSSEline = 0;
unsigned short *scanline[4];
//int spitch = pitch/2;
int neg = 0,step;
__m128i lA,lB,lC,lD,gA,gB,gC,gD,o128,t1;
uint8_t *lineAPos, *lineBPos, *lineCPos, *lineDPos;
uint8_t *outlinePos8;
uint16_t *outlinePos16;
offset = -offset;
//yposf = height * offset;
yposf = (float)height*(0.5f - 1.0f/(2.0f*zoom) - offset);
ystepf = 1.0f/zoom;
if(yposf < 0.0)
neg = 1;
if(pitch < 0)
yposf -= ystepf;
/* yposi = floor(yposf);
remainf = yposf - (float)yposi;
tablepos = (remainf*(float)SUBPIXEL);
yposi = abs(yposi);
if(yposi==0 && tablepos == 0)
return; // no move required
*/
// -3 , 0 best small notch at zero?
//
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
step = 4;
break;
case DECODED_FORMAT_RGB24:
step = 3;
break;
case DECODED_FORMAT_YUYV:
step = 4;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
step = 8;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
step = 6;
break;
default:
assert(0);
break;
}
{
static char zeroline[1024] = {0};
int y,yoffset = ((int)(yposf-2.0)),yend = ((int)(yposf+2.0+ystepf*height));
unsigned char *src = (unsigned char *)RGB48;
unsigned char *dst = (unsigned char *)RGB48;
unsigned char *ptr = (unsigned char *)buffer;
if(yoffset < 0) yoffset = 0;
if(yend > height) yend = height;
src += pitch * yoffset;
for(y=yoffset; y<yend; y++)
{
memcpy(ptr, src, widthbytes);
ptr += widthbytes;
src += pitch;
}
ptr = (unsigned char *)buffer;
for(y=0;y<height; y++)
{
int i,t,yp = ((int)yposf);
int rmdr = 63-((int)(yposf*64.0) & 63);
int gains[4];
yp -= 1; // use -2 cause a image down shift //DAN20100225
t = 0;
for(i=0; i<4; i++)
{
if(yp<0 || yp>= height) // skip 0 line as the top line was zagged
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)zeroline;
}
else
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)&ptr[widthbytes*(yp-yoffset)];
}
yp++;
rmdr+=64;
}
if(t)
{
gA = _mm_set1_epi16(gains[0]);
gB = _mm_set1_epi16(gains[1]);
gC = _mm_set1_epi16(gains[2]);
gD = _mm_set1_epi16(gains[3]);
outlinePos8 = (uint8_t *)dst;
outlinePos16 = (uint16_t *)dst;
lineAPos = (uint8_t *)scanline[0];
lineBPos = (uint8_t *)scanline[1];
lineCPos = (uint8_t *)scanline[2];
lineDPos = (uint8_t *)scanline[3];
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=8;
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=8;
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=8;
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=8;
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16[3] = _mm_extract_epi16(o128, 3);
outlinePos16+=4;
break;
case DECODED_FORMAT_WP13:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=6;
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=6;
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=6;
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=6;
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16+=3;
break;
case DECODED_FORMAT_RG64:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=8;
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=8;
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=8;
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=8;
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16[3] = _mm_extract_epi16(o128, 3);
outlinePos16+=4;
break;
case DECODED_FORMAT_RG48:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=6;
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=6;
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=6;
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=6;
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16+=3;
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_YUYV:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=4;
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=4;
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=4;
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=4;
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_srli_epi16(o128,4);
outlinePos8[0] = _mm_extract_epi16(o128, 0);
outlinePos8[1] = _mm_extract_epi16(o128, 1);
outlinePos8[2] = _mm_extract_epi16(o128, 2);
outlinePos8[3] = _mm_extract_epi16(o128, 3);
outlinePos8+=4;
break;
case DECODED_FORMAT_RGB24:
{
int r,g,b;
b = ((lineAPos[0] * gains[0])>>7) +
((lineBPos[0] * gains[1])>>7) +
((lineCPos[0] * gains[2])>>7) +
((lineDPos[0] * gains[3])>>7); //16-bit
g = ((lineAPos[1] * gains[0])>>7) +
((lineBPos[1] * gains[1])>>7) +
((lineCPos[1] * gains[2])>>7) +
((lineDPos[1] * gains[3])>>7); //16-bit
r = ((lineAPos[2] * gains[0])>>7) +
((lineBPos[2] * gains[1])>>7) +
((lineCPos[2] * gains[2])>>7) +
((lineDPos[2] * gains[3])>>7); //16-bit
if(r<0) r = 0; if(r>65535) r = 65535;
if(g<0) g = 0; if(g>65535) g = 65535;
if(b<0) b = 0; if(b>65535) b = 65535;
lineAPos+=3;
lineBPos+=3;
lineCPos+=3;
lineDPos+=3;
outlinePos8[0] = b >> 8; //b
outlinePos8[1] = g >> 8; //g
outlinePos8[2] = r >> 8; //r
outlinePos8+=3;
/* SSE2 can't load byte alligned
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=3;
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=3;
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=3;
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=3;
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_srli_epi16(o128,4);
outlinePos8[0] = _mm_extract_epi16(o128, 0); //b
outlinePos8[1] = _mm_extract_epi16(o128, 1); //g
outlinePos8[2] = _mm_extract_epi16(o128, 2); //r
outlinePos8+=3;
*/
}
break;
}
}
else
{
if(decoder->StereoBufferFormat == DECODED_FORMAT_YUYV)
{
memset(dst, 0x10801080, widthbytes);
}
else
{
memset(dst, 0, widthbytes);
}
}
yposf += ystepf;
dst += pitch;
}
}
}
void RGB48VerticalShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer,
int widthbytes, int height, int pitch, float offset)
{
float yposf,remainf;
int yposi,tablepos,x,y;
int gainA,gainB,gainC,gainD;
//int endofSSEline = 0;
unsigned short *scanline[4], *tline;
int spitch = pitch/2;
int neg = 0,shift = 0,skip,step;
int origwidthbytes = widthbytes;
int origwidthextra;
__m128i lA, lB, lC, lD, gA, gB, gC, gD, o128, t1;
__m128i *lineA, *lineB, *lineC, *lineD, *outline128;
// offset = -offset;
if(offset < 0.0)
neg = 1;
yposf = height * offset;
yposi = (int)floor(yposf);
remainf = yposf - (float)yposi;
tablepos = (int)(remainf*(float)SUBPIXEL);
yposi = abs(yposi);
if(yposi==0 && tablepos == 0)
return; // no move required
// -3 , 0 best small notch at zero?
//
if(neg)
{
yposi -= 2;
gainA = gains[tablepos][0];
gainB = gains[tablepos][1];
gainC = gains[tablepos][2];
gainD = gains[tablepos][3];
}
else
{
yposi -= 1; //offset inherent in the table
gainD = gains[tablepos][0];
gainC = gains[tablepos][1];
gainB = gains[tablepos][2];
gainA = gains[tablepos][3];
}
gA = _mm_set1_epi16(gainA);
gB = _mm_set1_epi16(gainB);
gC = _mm_set1_epi16(gainC);
gD = _mm_set1_epi16(gainD);
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
skip = 4;
step = 16;
break;
case DECODED_FORMAT_RGB24:
skip = 3;
step = 16;
break;
case DECODED_FORMAT_YUYV:
skip = 2;
step = 16;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
default:
skip = 6;
step = 32;
break;
}
// scanline[0] = buffer;
// scanline[1] = buffer + width*skip/2;
// scanline[2] = buffer + width*skip/2*2;
// scanline[3] = buffer + width*skip/2*3;
widthbytes += (step - 1);
widthbytes -= (widthbytes % step);
origwidthextra = (origwidthbytes % step);
scanline[0] = buffer;
scanline[1] = buffer + widthbytes/2;
scanline[2] = buffer + widthbytes/2*2;
scanline[3] = buffer + widthbytes/2*3;
for(y=0; y<4; y++)
{
if(yposi+y >=0 && yposi+y<height)
{
unsigned short *ptr = RGB48;
if(neg)
ptr += (height-1-yposi-y)*spitch;
else
ptr += (yposi+y)*spitch;
memcpy(scanline[y], ptr, origwidthbytes);
}
else
{
memset(scanline[y], 0, origwidthbytes);
}
}
{
for(y=0;y<height; y++)
{
unsigned short *ptr = RGB48;
if(neg)
ptr += (height-y-1)*spitch;
else
ptr += y*spitch;
outline128 = (__m128i *)ptr;
lineA = (__m128i *)scanline[0];
lineB = (__m128i *)scanline[1];
lineC = (__m128i *)scanline[2];
lineD = (__m128i *)scanline[3];
//for(x=0;x<width*skip/2; x+=step)
for(x=0;x<widthbytes; x+=step)
{
__m128i half;
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_WP13:
{
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
shift = 0;
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_RG48:
{
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
shift = 3;
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
lA = _mm_loadu_si128(lineA);
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB);
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC);
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD);
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
shift = 3;
break;
}
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
if(shift)
{
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
if(skip == 6) //RGB48 || WP13
{
if(widthbytes == origwidthbytes || x+16 < origwidthbytes)
_mm_storeu_si128(outline128++, o128);
else
{
//if(x < origwidthbytes+16/*bytes in an SSE2 reg*/)
_mm_storeu_si128((__m128i *)scanline[0], o128);
memcpy((char *)outline128, (char *)scanline[0], origwidthextra);
outline128++;
}
}
else
{
half = o128;
}
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_WP13:
{
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
shift = 0;
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_RG48:
{
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
shift = 3;
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
lA = _mm_loadu_si128(lineA++);
lA = _mm_unpacklo_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB++);
lB = _mm_unpacklo_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC++);
lC = _mm_unpacklo_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD++);
lD = _mm_unpacklo_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
shift = 3;
break;
}
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
if(shift)
{
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
if(skip != 6) //!RGB48 || !WP13
{
half = _mm_srli_epi16(half,8);
o128 = _mm_srli_epi16(o128,8);
o128 = _mm_packus_epi16(o128, half);
}
if(widthbytes == origwidthbytes || x+32 < origwidthbytes)
{
_mm_storeu_si128(outline128++, o128);
}
else
{
//if(x+16 < origwidthbytes+16)
if(origwidthextra > 16)
{
_mm_storeu_si128((__m128i *)scanline[0], o128);
memcpy((char *)outline128, (char *)scanline[0], origwidthextra - 16);
}
outline128++;
}
}
tline = scanline[0];
scanline[0] = scanline[1];
scanline[1] = scanline[2];
scanline[2] = scanline[3];
scanline[3] = tline;
if(yposi+y+4 >=0 && yposi+y+4<height)
{
unsigned short *ptr = RGB48;
if(neg)
ptr += (height-1-(yposi+y+4))*spitch;
else
ptr += (yposi+y+4)*spitch;
memcpy(scanline[3], ptr, origwidthbytes);
}
else
{
memset(scanline[3], 0, origwidthbytes);
}
}
}
}
void RGB48HoriShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye)
{
float xposf,xstepf;
int x;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
short *sscanline = (short *)buffer;
int neg = 0;
float offset = hoffset;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
ptrR -= 6;
}
}
if(eye > 0)
{
zoom *= 1.0f + frameTilt;
}
else
{
zoom /= 1.0f + frameTilt;
}
xposf = (float)width*(0.5f - 1.0f/(2.0f*zoom) - offset);
xposf -= width * roffset * 0.5f / zoom;
xposf += (float)line * ((float)width* roffset / ((float)height*zoom));
if(xposf < 0.0)
neg = 1;
xstepf = 1.0f/zoom;
memcpy(scanline, RGB48, width*3*2);
{
//unsigned short zeroline[3] = {0};
int xx = 0;
int ixpos = (int)(xposf * 65536.0f);
int ixstep = (int)(xstepf * 65536.0f);
float xbase = xposf / (float)width;
float xstep = xstepf / (float)width;
float z = (decoder->cfhddata.FrameHDynamic - 1.0f)*2.0f;
// int holdstart = width*5/10; // Use to specify a area of uniform stretch
// int holdend = width*5/10;
int holdstart = (int)((decoder->cfhddata.FrameHDynCenter - decoder->cfhddata.FrameHDynWidth*0.125)*(float)width);
int holdend = (int)((decoder->cfhddata.FrameHDynCenter + decoder->cfhddata.FrameHDynWidth*0.125)*(float)width);
float flatxstep;
float modified_xstep_avg;
float bottomxstep;
float basexstepstart;
float basexstepend;
float range;
#if MMXSUPPORTED //TODO DANREMOVE
__m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff);
#endif
if(holdstart < 0) holdstart = 0, holdend = (int)((decoder->cfhddata.FrameHDynWidth*0.5)*(float)width);
if(holdend > width) holdend = width, holdstart = (int)((1.0 - decoder->cfhddata.FrameHDynWidth*0.5)*(float)width);
range = (float)(holdend - holdstart);
flatxstep = xstep-z*0.5f*xstep;
modified_xstep_avg = (xstep * (float)width - range * flatxstep) / ((float)width - range);
bottomxstep = modified_xstep_avg - (flatxstep - modified_xstep_avg);
if(holdstart == (width-holdend))
{
basexstepstart = bottomxstep;
basexstepend = bottomxstep;
}
else if(holdstart < (width-holdend))
{
float a = (float)holdstart / (float)(width-holdend);
float startavg = a * modified_xstep_avg + (1.0f - a) * flatxstep;
float endavg = (modified_xstep_avg * ((float)width-range) - startavg * (float)holdstart) / (float)(width-holdend);
basexstepstart = startavg - (flatxstep - startavg);
basexstepend = endavg - (flatxstep - endavg);
}
else
{
float a = (float)(width-holdend) / (float)holdstart;
float endavg = a * modified_xstep_avg + (1.0f - a) * flatxstep;
float startavg = (modified_xstep_avg * ((float)width-range) - endavg * (float)(width-holdend)) / (float)holdstart;
basexstepstart = startavg - (flatxstep - startavg);
basexstepend = endavg - (flatxstep - endavg);
}
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += basexstepstart*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += basexstepend*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1;// was -2 causing a right shift //DAN20100225
#if MMXSUPPORTED //TODO DANREMOVE
if(xp>4 && xp<width-4 && xx < (width-1)*3) //We need 3 values for RGB< yet we write 4, so the last pixel can't be done with MMX
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-1)*3;
src64 = (__m64 *)&sscanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
src64 = (__m64 *)&sscanline[linepos+3];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+6];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+9];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 1);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
#endif
{
int i,r=0,g=0,b=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
gains += lanczos[rmdr]>>1;
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * sscanline[xp*3]);
g += (gains * sscanline[xp*3+1]);
b += (gains * sscanline[xp*3+2]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
}
xx+=3;
}
}
else
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += basexstepstart*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += basexstepend*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1; // was -2 causing a right shift //DAN20100225
#if MMXSUPPORTED //TODO DANREMOVE
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-0)*3; //DAN20102602 -- fix left edge error.
src64 = (__m64 *)&scanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
src64 = (__m64 *)&scanline[linepos+3];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+6];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+9];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 2);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
#endif
{
int i,r=0,g=0,b=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
gains += lanczos[rmdr]>>1;
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * scanline[xp*3]);
g += (gains * scanline[xp*3+1]);
b += (gains * scanline[xp*3+2]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
}
xx+=3;
}
}
}
#if MMXSUPPORTED //TODO DANREMOVE
//_mm_empty();
#endif
}
#if 0 //Why is this not used?
void RGB48HoriShiftZoomFine(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye)
{
float xposf,remainf,xstepf;
int xposi,tablepos,x;
int Ra,Rb,Rc,Rd;
int Ga,Gb,Gc,Gd;
int Ba,Bb,Bc,Bd;
int gainA,gainB,gainC,gainD;
int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
short *sscanline = (short *)buffer;
int neg = 0,shift = 0;
float offset = hoffset;
__m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2;
__m128i *line128, *outline128;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
ptrR -= 6;
}
}
if(eye > 0)
{
zoom *= 1.0 + frameTilt;
}
else
{
zoom /= 1.0 + frameTilt;
}
xposf = (float)width*(0.5 - 1.0/(2.0*zoom) - offset);
xposf -= width * roffset * 0.5 / zoom;
xposf += (float)line * ((float)width* roffset / ((float)height*zoom));
if(xposf < 0.0)
neg = 1;
xstepf = 1.0/zoom;
memcpy(scanline, RGB48, width*3*2);
{
unsigned short zeroline[3] = {0};
int xx = 0;
int ixpos = xposf * 65536.0;
int ixstep = xstepf * 65536.0;
float xbase = xposf / (float)width;
float xstep = xstepf / (float)width;
float z = (decoder->cfhddata.FrameHDynamic - 1.0)*2.0;
int holdstart = width*5/10; // Use to specify a area of uniform stretch
int holdend = width*5/10;
float flatxstep = xstep-z*0.5*xstep;
float modified_xstep_avg = (xstep * (float)width - (float)(holdend - holdstart) * flatxstep) / (float)(width - (holdend - holdstart));
float bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg);
__m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff);
if(bottomxstep < 0.0)
{
bottomxstep = 0.0;
flatxstep = modified_xstep_avg + modified_xstep_avg;
}
if(flatxstep < 0.0)
{
flatxstep = 0.0;
bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg);
}
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
/* fxpos = xbase + xstep * x;//(float)ixpos/(65536.0*(float)width);
if(fxpos >= 0.0 && fxpos <= 1.0)
{
if(z > 0.0)
{
fxpos = 1.8*fxpos - 2.4*fxpos*fxpos + (1.6*fxpos*fxpos*fxpos);
fxpos = fxpos * (z) + (xbase + xstep * x) * (1.0-z);
}
else
{
fxpos = 3.0*fxpos*fxpos - 2.0*fxpos*fxpos*fxpos;
fxpos = fxpos * (-z) + (xbase + xstep * x) * (1.0+z);
}
}
*/
xp = (fxpos * 65536.0*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1;// was -2 causing a right shift //DAN20100225
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-1)*3;
src64 = (__m64 *)&sscanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
src64 = (__m64 *)&sscanline[linepos+3];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+6];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+9];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 1);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
{
int i,t,r=0,g=0,b=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
/* if(i == 3) //DAN20101112 this code was crashing disparity zoom
{
gains = lanczos[rmdr]>>1;
r += (gains * sscanline[(xp-1)*3]);
g += (gains * sscanline[(xp-1)*3+1]);
b += (gains * sscanline[(xp-1)*3+2]);
}
else */
{
gains += lanczos[rmdr]>>1;
}
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * sscanline[xp*3]);
g += (gains * sscanline[xp*3+1]);
b += (gains * sscanline[xp*3+2]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
}
xx+=3;
}
}
else
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
/* fxpos = xbase + xstep * x;//(float)ixpos/(65536.0*(float)width);
if(fxpos >= 0.0 && fxpos <= 1.0)
{
if(z > 0.0)
{
fxpos = 1.8*fxpos - 2.4*fxpos*fxpos + (1.6*fxpos*fxpos*fxpos);
fxpos = fxpos * (z) + (xbase + xstep * x) * (1.0-z);
}
else
{
fxpos = 3.0*fxpos*fxpos - 2.0*fxpos*fxpos*fxpos;
fxpos = fxpos * (-z) + (xbase + xstep * x) * (1.0+z);
}
}
*/
xp = (fxpos * 65536.0*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1; // was -2 causing a right shift //DAN20100225
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-0)*3; //DAN20102602 -- fix left edge error.
src64 = (__m64 *)&scanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
src64 = (__m64 *)&scanline[linepos+3];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+6];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+9];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 2);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
{
int i,t,r=0,g=0,b=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
/* if(i == 3) //DAN20101112 this code was crashing disparity zoom
{
gains = lanczos[rmdr]>>1;
r += (gains * scanline[(xp-1)*3]);
g += (gains * scanline[(xp-1)*3+1]);
b += (gains * scanline[(xp-1)*3+2]);
}
else */
{
gains += lanczos[rmdr]>>1;
}
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * scanline[xp*3]);
g += (gains * scanline[xp*3+1]);
b += (gains * scanline[xp*3+2]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
}
xx+=3;
}
}
}
/*
memcpy(scanline, RGB48, width*3*2);
{
for(x=0;x<width*3; x+=3) //RGB
{
int r,g,b,xp = ((int)xposf)*3;
xposf += xstepf;
if(xp<0 || xp>= width*3)
{
RGB48[x] = 0;
RGB48[x+1] = 0;
RGB48[x+2] = 0;
}
else
{
r = scanline[xp];
g = scanline[xp+1];
b = scanline[xp+2];
RGB48[x] = r;
RGB48[x+1] = g;
RGB48[x+2] = b;
}
}
}
*/
//_mm_empty();
}
#endif
void RGBA64HoriShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye)
{
float xposf,xstepf;
int x;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
short *sscanline = (short *)buffer;
int neg = 0;
float offset = hoffset;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*4) - 4;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
ptrR -= 4;
}
}
if(eye > 0)
{
zoom *= 1.0f + frameTilt;
}
else
{
zoom /= 1.0f + frameTilt;
}
xposf = (float)width*(0.5f - 1.0f/(2.0f*zoom) - offset);
xposf -= width * roffset * 0.5f;
xposf += line * (width* roffset / ((float)height*zoom));
if(xposf < 0.0)
neg = 1;
xstepf = 1.0f/zoom;
memcpy(scanline, RGB48, width*4*2);
{
//unsigned short zeroline[3] = {0};
int xx = 0;
int ixpos = (int)(xposf * 65536.0f);
int ixstep = (int)(xstepf * 65536.0f);
float xbase = xposf / (float)width;
float xstep = xstepf / (float)width;
float z = (decoder->cfhddata.FrameHDynamic - 1.0f)*2.0f;
int holdstart = width*5/10; // Use to specify a area of uniform stretch
int holdend = width*5/10;
float flatxstep = xstep-z*0.5f*xstep;
float modified_xstep_avg = (xstep * (float)width - (float)(holdend - holdstart) * flatxstep) / (float)(width - (holdend - holdstart));
float bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg);
#if MMXSUPPORTED //TODO DANREMOVE
__m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff);
#endif
if(bottomxstep < 0.0)
{
bottomxstep = 0.0;
flatxstep = modified_xstep_avg + modified_xstep_avg;
}
if(flatxstep < 0.0)
{
flatxstep = 0.0;
bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg);
}
if(decoder->StereoBufferFormat == DECODED_FORMAT_W13A)
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1;// was -2 causing a right shift //DAN20100225
#if MMXSUPPORTED //TODO DANREMOVE
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-1)*4;
src64 = (__m64 *)&sscanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
src64 = (__m64 *)&sscanline[linepos+4];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+8];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+12];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 1);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
#endif
{
int i,r=0,g=0,b=0,a=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
gains += lanczos[rmdr]>>1;
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * sscanline[xp*4]);
g += (gains * sscanline[xp*4+1]);
b += (gains * sscanline[xp*4+2]);
a += (gains * sscanline[xp*4+3]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
a >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
if(a<0) a=0; else if(a>65535) a=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
RGB48[xx+3] = a;
}
xx+=4;
}
}
else
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1; // was -2 causing a right shift //DAN20100225
#if MMXSUPPORTED //TODO DANREMOVE
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-0)*4; //DAN20102602 -- fix left edge error.
src64 = (__m64 *)&scanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
src64 = (__m64 *)&scanline[linepos+4];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+8];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+12];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 2);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
#endif
{
int i,r=0,g=0,b=0,a=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
gains += lanczos[rmdr]>>1;
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * scanline[xp*4]);
g += (gains * scanline[xp*4+1]);
b += (gains * scanline[xp*4+2]);
a += (gains * scanline[xp*4+3]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
a >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
if(a<0) a=0; else if(a>65535) a=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
RGB48[xx+3] = a;
}
xx+=4;
}
}
}
#if MMXSUPPORTED //TODO DANREMOVE
//_mm_empty();
#endif
}
void RGB48WindowMask(DECODER *decoder, unsigned short *RGB48, int width, int channel, float windowMask)
{
float line = (float)width * fabsf(windowMask);
int pixelbytes = 6;
float frac = (float)(line-(float)((int)line));
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
pixelbytes = 8;
break;
}
if(decoder->StereoBufferFormat == DECODED_FORMAT_W13A ||
decoder->StereoBufferFormat == DECODED_FORMAT_WP13) // signed math needed
{
short *ptrL = (short *)RGB48;
short *ptrR = (short *)RGB48;
if(windowMask < 0)
channel = channel == 0 ? 1 : 0;
if(pixelbytes == 6)
{
if(channel == 0)
{
memset(ptrL, 0, 6*(int)line);
ptrL += ((int)line*3);
ptrL[0] = (int)((float)ptrL[0] * (1.0-frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0-frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0-frac));
}
else
{
ptrR += ((width-(int)line)*3);
memset(ptrR, 0, 6*(int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac));
}
}
else
{
if(channel == 0)
{
memset(ptrL, 0, 8*(int)line);
ptrL += ((int)line*4);
ptrL[0] = (int)((float)ptrL[0] * (1.0-frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0-frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0-frac));
ptrL[3] = (int)((float)ptrL[3] * (1.0-frac));
}
else
{
ptrR += ((width-(int)line)*4);
memset(ptrR, 0, 8*(int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac));
ptrR[-4] = (int)((float)ptrR[-4] * (1.0-frac));
}
}
}
else
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
if(windowMask < 0)
channel = channel == 0 ? 1 : 0;
if(pixelbytes == 6)
{
if(channel == 0)
{
memset(ptrL, 0, 6*(int)line);
ptrL += ((int)line*3);
ptrL[0] = (int)((float)ptrL[0] * (1.0-frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0-frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0-frac));
}
else
{
ptrR += ((width-(int)line)*3);
memset(ptrR, 0, 6*(int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac));
}
}
else
{
if(channel == 0)
{
memset(ptrL, 0, 8*(int)line);
ptrL += ((int)line*4);
ptrL[0] = (int)((float)ptrL[0] * (1.0-frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0-frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0-frac));
ptrL[3] = (int)((float)ptrL[3] * (1.0-frac));
}
else
{
ptrR += ((width-(int)line)*4);
memset(ptrR, 0, 8*(int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac));
ptrR[-4] = (int)((float)ptrR[-4] * (1.0-frac));
}
}
}
}
void RGB48HoriShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offset, int flip)
{
float xposf,remainf;
int xposi,tablepos,x;
int gainA,gainB,gainC,gainD;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
int neg = 0,shift = 0;
__m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2;
__m128i *line128, *outline128;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t1,t2,t3;
t1 = ptrL[0];
ptrL[0] = ptrR[0];
ptrR[0] = t1;
t2 = ptrL[1];
ptrL[1] = ptrR[1];
ptrR[1] = t2;
t3 = ptrL[2];
ptrL[2] = ptrR[2];
ptrR[2] = t3;
ptrL += 3;
ptrR -= 3;
}
}
if(offset < 0.0)
neg = 1;
xposf = width * offset;
xposi = (int)floorf(xposf);
remainf = xposf - (float)xposi;
tablepos = (int)(remainf*(float)SUBPIXEL);
xposi = abs(xposi);
if(xposi==0 && tablepos == 0)
return; // no move required
gainA = gains[tablepos][0];
gainB = gains[tablepos][1];
gainC = gains[tablepos][2];
gainD = gains[tablepos][3];
if(neg == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-xposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<xposi+2;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
memcpy(ptr, RGB48, (nwidth)*3*2);
ptr += (nwidth)*3;
for(x=0;x<16;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+xposi-2>=0)
{
*ptr++ = RGB48[(x+xposi-2)*3];//r
*ptr++ = RGB48[(x+xposi-2)*3+1];//g
*ptr++ = RGB48[(x+xposi-2)*3+2];//b
}
else
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
}
memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
ptr += (width-xposi)*3;
for(x=0;x<xposi+16;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
}
gA = _mm_set1_epi16(gainA);
gB = _mm_set1_epi16(gainB);
gC = _mm_set1_epi16(gainC);
gD = _mm_set1_epi16(gainD);
line128 = (__m128i *)&scanline[0];
//outline128 = line128;
outline128 = (__m128i *)&RGB48[0];
//l1 = load128;//r1,g1,b1,r2,g2,b2,r3,g3,
//l2 = load128;//b3,r4,g4,b4,r5,g5,b5,r6
//l3 = load128;//g6,b6,r7,g7,b7,r8,g8,b8
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
l1 = _mm_loadu_si128(line128++);
l2 = _mm_loadu_si128(line128++);
l3 = _mm_loadu_si128(line128++);
shift = 0;
}
else
{
l1 = _mm_loadu_si128(line128++);
l1 = _mm_srli_epi16(l1,3); //13-bit unsigned
l2 = _mm_loadu_si128(line128++);
l2 = _mm_srli_epi16(l2,3); //13-bit unsigned
l3 = _mm_loadu_si128(line128++);
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
shift = 3;
}
for(x=0;x<width*3; x+=8)
{
//o=l1* gainA
o128 = _mm_mulhi_epi16(l1, gA);
//t1 = l1<<3*16 //t1 = r2,g2,b2,r3,g3, 0 0 0
//t2 = l2>>16*5 //t2 = 0 0 0 0 0 b3,r4,g4
//t1 += t2; //t1 = r2,g2,b2,r3,g3,b3,r4,g4
//l1 = t1 //l1 = r2,g2,b2,r3,g3,b3,r4,g4
//t1 *= gainB
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_slli_si128(l2,5*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gB);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<3*16 //t1 = r3,g3,b3,r4,g4 0 0 0
//t2 = l2<<3*16; //t2 = b4,r5,g5,b5,r6 0 0 0
//t2 >>= 5*16; //t2 = 0 0 0 0 0 b4,r5,g5
//t1 += t2 //t1 = r3,g3,b3,r4,g4,b4,r5,g5
//l1 = t1 //l1 = r3,g3,b3,r4,g4,b4,r5,g5
//t1 *= gainC
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_srli_si128(l2,3*2);
t2 = _mm_slli_si128(t2,5*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gC);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<3*16 //t1 = r4,g4,b4,r5,g5 0 0 0
//t2 = l2<<6*16 //t2 = b5,r6 0 0 0 0 0 0
//t2 >>= 5 * 16; //t2 = 0 0 0 0 0 b5,r6 0
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6, 0
//t2 = l3>>7*16 //t2 = 0 0 0 0 0 0 0 g6
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6,g6
//t1 *= gainD
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_srli_si128(l2,6*2);
t2 = _mm_slli_si128(t2,5*2);
t1 = _mm_adds_epi16(t1,t2);
t2 = _mm_slli_si128(l3,7*2);
t1 = _mm_adds_epi16(t1,t2);
t1 = _mm_mulhi_epi16(t1, gD);
o128 = _mm_adds_epi16(o128,t1);
l1 = l2;
l2 = l3;
l3 = _mm_loadu_si128(line128++);
if(shift)
{
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
_mm_storeu_si128(outline128++, o128);
}
}
void RGBA64HoriShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offset, int flip)
{
float xposf,remainf;
int xposi,tablepos,x;
int gainA,gainB,gainC,gainD;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
int neg = 0,shift = 0;
__m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2;
__m128i *line128, *outline128;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*4) - 4;
for(x=0;x<width/2;x++)
{
int t1,t2,t3,t4;
t1 = ptrL[0];
ptrL[0] = ptrR[0];
ptrR[0] = t1;
t2 = ptrL[1];
ptrL[1] = ptrR[1];
ptrR[1] = t2;
t3 = ptrL[2];
ptrL[2] = ptrR[2];
ptrR[2] = t3;
t4 = ptrL[2];
ptrL[3] = ptrR[3];
ptrR[3] = t4;
ptrL += 4;
ptrR -= 4;
}
}
if(offset < 0.0)
neg = 1;
xposf = width * offset;
xposi = (int)floorf(xposf);
remainf = xposf - (float)xposi;
tablepos = (int)(remainf*(float)SUBPIXEL);
xposi = abs(xposi);
if(xposi==0 && tablepos == 0)
return; // no move required
gainA = gains[tablepos][0];
gainB = gains[tablepos][1];
gainC = gains[tablepos][2];
gainD = gains[tablepos][3];
if(neg == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-xposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<xposi+2;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
memcpy(ptr, RGB48, (nwidth)*4*2);
ptr += (nwidth)*4;
for(x=0;x<16;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+xposi-2>=0)
{
*ptr++ = RGB48[(x+xposi-2)*4];//r
*ptr++ = RGB48[(x+xposi-2)*4+1];//g
*ptr++ = RGB48[(x+xposi-2)*4+2];//b
*ptr++ = RGB48[(x+xposi-2)*4+3];//a
}
else
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
}
memcpy(ptr, &RGB48[xposi*4], (width-xposi)*4*2);
ptr += (width-xposi)*4;
for(x=0;x<xposi+16;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
}
gA = _mm_set1_epi16(gainA);
gB = _mm_set1_epi16(gainB);
gC = _mm_set1_epi16(gainC);
gD = _mm_set1_epi16(gainD);
line128 = (__m128i *)&scanline[0];
//outline128 = line128;
outline128 = (__m128i *)&RGB48[0];
//l1 = load128;//r1,g1,b1,a1,r2,g2,b2,a2,
//l2 = load128;//r3,g3,b3,a3,r4,g4,b4,a4,
//l3 = load128;//r5,g5,b5,a5,r6,g6,b6,a6,
//l4 = load128;//r7,g7,b7,a7,r8,g8,b8,a8,
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A)
{
l1 = _mm_loadu_si128(line128++);
l2 = _mm_loadu_si128(line128++);
l3 = _mm_loadu_si128(line128++);
shift = 0;
}
else
{
l1 = _mm_loadu_si128(line128++);
l1 = _mm_srli_epi16(l1,3); //13-bit unsigned
l2 = _mm_loadu_si128(line128++);
l2 = _mm_srli_epi16(l2,3); //13-bit unsigned
l3 = _mm_loadu_si128(line128++);
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
shift = 3;
}
for(x=0;x<width*4; x+=8)
{
//o=l1* gainA
o128 = _mm_mulhi_epi16(l1, gA);
//t1 = l1<<4*16 //t1 = r2,g2,b2,a2,0, 0 0 0
//t2 = l2>>4*16 //t2 = 0 0 0 0 r3,g3,b3,a4
//t1 += t2; //t1 = r2,g2,b2,a2,r3,g3,b3,a4
//l1 = t1 //l1 = r2,g2,b2,a2,r3,g3,b3,a4
//t1 *= gainB
//o += t1
t1 = _mm_srli_si128(l1,4*2);
t2 = _mm_slli_si128(l2,4*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gB);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<4*16 //t1 = r3,g3,b3,a3, 0 0 0 0
//t2 = l2<<4*16;//t2 = r4,g4,b4,a4, 0 0 0 0
//t2 >>= 4*16; //t2 = 0 0 0 0 r4,g4,b4,a4
//t1 += t2 //t1 = r3,g3,b3,a4,r4,g4,b4,a4
//l1 = t1 //l1 = r3,g3,b3,a4,r4,g4,b4,a4
//t1 *= gainC
//o += t1
t1 = _mm_srli_si128(l1,4*2);
t2 = _mm_srli_si128(l2,4*2);
t2 = _mm_slli_si128(t2,4*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gC);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<4*16 //t1 = r4,g4,b4,a4,0 0 0 0
//t2 = l3>>4*16 //t2 = 0 0 0 0 r5,g5,b5,a5
//t1 += t2 //t1 = r4,g4,b4,a4,r5,g5,b5,a5
//t1 *= gainD
//o += t1
t1 = _mm_srli_si128(l1,4*2);
t2 = _mm_slli_si128(l3,4*2);
t1 = _mm_adds_epi16(t1,t2);
t1 = _mm_mulhi_epi16(t1, gD);
o128 = _mm_adds_epi16(o128,t1);
l1 = l2;
l2 = l3;
l3 = _mm_loadu_si128(line128++);
if(shift)
{
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
_mm_storeu_si128(outline128++, o128);
}
}
void RGB48HoriShiftAnaglyph(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width,
float offsetR, float offsetG, float offsetB ,
int flipR, int flipG, int flipB)
{
float Rxposf,Rremainf;
int Rxposi,Rtablepos;
float Gxposf,Gremainf;
int Gxposi,Gtablepos;
float Bxposf,Bremainf;
int Bxposi,Btablepos;
int x;
int RgainA,RgainB,RgainC,RgainD;
int GgainA,GgainB,GgainC,GgainD;
int BgainA,BgainB,BgainC,BgainD;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
int negR = 0;
int negG = 0;
int negB = 0;
int shift = 0;
__m128i l1,l2,l3,o128,t1,t2;
__m128i *line128, *outline128;
__m128i gA1,gB1,gC1,gD1,gA2,gB2,gC2,gD2,gA3,gB3,gC3,gD3;
if(flipR)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL = *ptrR;
*ptrR = t;
ptrL += 3;
ptrR -= 3;
}
}
if(flipG)
{
unsigned short *ptrL = &RGB48[1];
unsigned short *ptrR = &RGB48[1];
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL = *ptrR;
*ptrR = t;
ptrL += 3;
ptrR -= 3;
}
}
if(flipB)
{
unsigned short *ptrL = &RGB48[2];
unsigned short *ptrR = &RGB48[2];
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL = *ptrR;
*ptrR = t;
ptrL += 3;
ptrR -= 3;
}
}
if(offsetR < 0.0)
negR = 1;
if(offsetG < 0.0)
negG = 1;
if(offsetB < 0.0)
negB = 1;
Rxposf = width * offsetR;
Rxposi = (int)floorf(Rxposf);
Rremainf = Rxposf - (float)Rxposi;
Rtablepos = (int)(Rremainf*(float)SUBPIXEL);
Gxposf = width * offsetG;
Gxposi = (int)floorf(Gxposf);
Gremainf = Gxposf - (float)Gxposi;
Gtablepos = (int)(Gremainf*(float)SUBPIXEL);
Bxposf = width * offsetB;
Bxposi = (int)floorf(Bxposf);
Bremainf = Bxposf - (float)Bxposi;
Btablepos = (int)(Bremainf*(float)SUBPIXEL);
Rxposi = abs(Rxposi);
Gxposi = abs(Gxposi);
Bxposi = abs(Bxposi);
if(Rxposi==0 && Rtablepos == 0)
return; // no move required
RgainA = gains[Rtablepos][0];
RgainB = gains[Rtablepos][1];
RgainC = gains[Rtablepos][2];
RgainD = gains[Rtablepos][3];
GgainA = gains[Gtablepos][0];
GgainB = gains[Gtablepos][1];
GgainC = gains[Gtablepos][2];
GgainD = gains[Gtablepos][3];
BgainA = gains[Btablepos][0];
BgainB = gains[Btablepos][1];
BgainC = gains[Btablepos][2];
BgainD = gains[Btablepos][3];
if(negR == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-Rxposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<Rxposi+2;x++)
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
for(x=0;x<nwidth;x++)
{
*ptr++ = RGB48[x*3];//r
ptr++;//g
ptr++;//b
}
for(x=0;x<16;x++)
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+Rxposi-2>=0)
{
*ptr++ = RGB48[(x+Rxposi-2)*3];//r
ptr++;//g
ptr++;//b
}
else
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
}
//memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
//ptr += (width-xposi)*3;
for(x=Rxposi;x<width;x++)
{
*ptr++ = RGB48[x*3];//r
ptr++;//g
ptr++;//b
}
for(x=0;x<Rxposi+16;x++)
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
}
if(negG == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-Gxposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<Gxposi+2;x++)
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
for(x=0;x<nwidth;x++)
{
ptr++;//r
*ptr++ = RGB48[x*3+1];//g
ptr++;//b
}
for(x=0;x<16;x++)
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+Gxposi-2>=0)
{
ptr++;//r
*ptr++ = RGB48[(x+Gxposi-2)*3+1];//g
ptr++;//b
}
else
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
}
//memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
//ptr += (width-xposi)*3;
for(x=Gxposi;x<width;x++)
{
ptr++;//r
*ptr++ = RGB48[x*3+1];//g
ptr++;//b
}
for(x=0;x<Gxposi+16;x++)
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
}
if(negB == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-Bxposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<Bxposi+2;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
for(x=0;x<nwidth;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = RGB48[x*3+2];//b
}
for(x=0;x<16;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+Bxposi-2>=0)
{
ptr++;//r
ptr++;//g
*ptr++ = RGB48[(x+Bxposi-2)*3+2];//b
}
else
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
}
//memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
//ptr += (width-xposi)*3;
for(x=Bxposi;x<width;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = RGB48[x*3+2];//b
}
for(x=0;x<Bxposi+16;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
}
gA1 = _mm_set_epi16(RgainA,GgainA,BgainA,RgainA,GgainA,BgainA,RgainA,GgainA);
gA2 = _mm_set_epi16(BgainA,RgainA,GgainA,BgainA,RgainA,GgainA,BgainA,RgainA);
gA3 = _mm_set_epi16(GgainA,BgainA,RgainA,GgainA,BgainA,RgainA,GgainA,BgainA);
gB1 = _mm_set_epi16(RgainB,GgainB,BgainB,RgainB,GgainB,BgainB,RgainB,GgainB);
gB2 = _mm_set_epi16(BgainB,RgainB,GgainB,BgainB,RgainB,GgainB,BgainB,RgainB);
gB3 = _mm_set_epi16(GgainB,BgainB,RgainB,GgainB,BgainB,RgainB,GgainB,BgainB);
gC1 = _mm_set_epi16(RgainC,GgainC,BgainC,RgainC,GgainC,BgainC,RgainC,GgainC);
gC2 = _mm_set_epi16(BgainC,RgainC,GgainC,BgainC,RgainC,GgainC,BgainC,RgainC);
gC3 = _mm_set_epi16(GgainC,BgainC,RgainC,GgainC,BgainC,RgainC,GgainC,BgainC);
gD1 = _mm_set_epi16(RgainD,GgainD,BgainD,RgainD,GgainD,BgainD,RgainD,GgainD);
gD2 = _mm_set_epi16(BgainD,RgainD,GgainD,BgainD,RgainD,GgainD,BgainD,RgainD);
gD3 = _mm_set_epi16(GgainD,BgainD,RgainD,GgainD,BgainD,RgainD,GgainD,BgainD);
line128 = (__m128i *)&scanline[0];
//outline128 = line128;
outline128 = (__m128i *)&RGB48[0];
//l1 = load128;//r1,g1,b1,r2,g2,b2,r3,g3,
//l2 = load128;//b3,r4,g4,b4,r5,g5,b5,r6
//l3 = load128;//g6,b6,r7,g7,b7,r8,g8,b8
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
l1 = _mm_loadu_si128(line128++);
l2 = _mm_loadu_si128(line128++);
l3 = _mm_loadu_si128(line128++);
shift = 0;
}
else
{
l1 = _mm_loadu_si128(line128++);
l1 = _mm_srli_epi16(l1,3); //13-bit unsigned
l2 = _mm_loadu_si128(line128++);
l2 = _mm_srli_epi16(l2,3); //13-bit unsigned
l3 = _mm_loadu_si128(line128++);
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
shift = 3;
}
for(x=0;x<width*3; x+=8)
{
//o=l1* gainA
o128 = _mm_mulhi_epi16(l1, gA1);
//t1 = l1<<3*16 //t1 = r2,g2,b2,r3,g3, 0 0 0
//t2 = l2>>16*5 //t2 = 0 0 0 0 0 b3,r4,g4
//t1 += t2; //t1 = r2,g2,b2,r3,g3,b3,r4,g4
//l1 = t1 //l1 = r2,g2,b2,r3,g3,b3,r4,g4
//t1 *= gainB
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_slli_si128(l2,5*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gB1);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<3*16 //t1 = r3,g3,b3,r4,g4 0 0 0
//t2 = l2<<3*16; //t2 = b4,r5,g5,b5,r6 0 0 0
//t2 >>= 5*16; //t2 = 0 0 0 0 0 b4,r5,g5
//t1 += t2 //t1 = r3,g3,b3,r4,g4,b4,r5,g5
//l1 = t1 //l1 = r3,g3,b3,r4,g4,b4,r5,g5
//t1 *= gainC
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_srli_si128(l2,3*2);
t2 = _mm_slli_si128(t2,5*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gC1);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<3*16 //t1 = r4,g4,b4,r5,g5 0 0 0
//t2 = l2<<6*16 //t2 = b5,r6 0 0 0 0 0 0
//t2 >>= 5 * 16; //t2 = 0 0 0 0 0 b5,r6 0
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6, 0
//t2 = l3>>7*16 //t2 = 0 0 0 0 0 0 0 g6
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6,g6
//t1 *= gainD
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_srli_si128(l2,6*2);
t2 = _mm_slli_si128(t2,5*2);
t1 = _mm_adds_epi16(t1,t2);
t2 = _mm_slli_si128(l3,7*2);
t1 = _mm_adds_epi16(t1,t2);
t1 = _mm_mulhi_epi16(t1, gD1);
o128 = _mm_adds_epi16(o128,t1);
t1 = gA1;
gA1 = gA2;
gA2 = gA3;
gA3 = t1;
t1 = gB1;
gB1 = gB2;
gB2 = gB3;
gB3 = t1;
t1 = gC1;
gC1 = gC2;
gC2 = gC3;
gC3 = t1;
t1 = gD1;
gD1 = gD2;
gD2 = gD3;
gD3 = t1;
l1 = l2;
l2 = l3;
l3 = _mm_loadu_si128(line128++);
if(shift)
{
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
_mm_storeu_si128(outline128++, o128);
}
}
void HistogramLine(DECODER *decoder, unsigned short *sbase, int width, int format, int whitepoint)
{
int x,val,ypos=0,upos=1,vpos=3;
int step = 1,pos=0;
short *ssbase = (short *)sbase;
uint32_t *lbase = (uint32_t *)sbase;
ToolsHandle *tools = decoder->tools;
int scaledvectorscope = 0;
if(tools == NULL)
return;
if(whitepoint == 13)
{
if(format == DECODED_FORMAT_RG64)
format = DECODED_FORMAT_W13A;
else
format = DECODED_FORMAT_WP13;
}
while(width/step > 360)
{
step*=2;
}
tools->waveformWidth = width/step;
decoder->tools->blurUVdone = 0;
switch(format & 0xffffff)
{
case DECODED_FORMAT_WP13:
decoder->tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
R = ssbase[0]>>5;
G = ssbase[1]>>5;
B = ssbase[2]>>5;
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
ssbase += step*3;
}
break;
case DECODED_FORMAT_W13A:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
R = ssbase[0]>>5;
G = ssbase[1]>>5;
B = ssbase[2]>>5;
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
ssbase += step*4;
}
break;
case DECODED_FORMAT_RG48:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
R = sbase[0]>>8;
G = sbase[1]>>8;
B = sbase[2]>>8;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
sbase += step*3;
}
break;
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_RG30:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
val = lbase[x];
R = (val>>22)&0xff;
G = (val>>12)&0xff;
B = (val>>02)&0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_AR10:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
val = lbase[x];
B = (val>>22)&0xff;
G = (val>>12)&0xff;
R = (val>>02)&0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_R210:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
val = SwapInt32BtoN(lbase[x]);
R = (val>>22)&0xff;
G = (val>>12)&0xff;
B = (val>>02)&0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_DPX0:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
val = SwapInt32BtoN(lbase[x]);
R = (val>>24)&0xff;
G = (val>>14)&0xff;
B = (val>>04)&0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_B64A:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
R = sbase[1]>>8;
G = sbase[2]>>8;
B = sbase[3]>>8;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
sbase += step*4;
}
break;
case COLOR_FORMAT_UYVY:
ypos=1,upos=0,vpos=2;
case DECODED_FORMAT_CbYCrY_8bit: // CMD: 20100109
case COLOR_FORMAT_YUYV:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int Y,U,V,R,G,B;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 2;
Y = bptr[ypos]-16;
U = bptr[upos]-128;
Y+= bptr[ypos+2]-16; Y>>=1;
V = bptr[vpos]-128;
R = (9535*Y + 14688*V)>>13; //13-bit white
G = (9535*Y - 4375*V - 1745*U)>>13;
B = (9535*Y + 17326*U)>>13;
//TODO much -20 to 120 RGB range.
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
if(scaledvectorscope)
{
U *= 255; U /= 314;
V *= 255; V /= 244;
}
//* 255.0/314.0
//* 255.0/244.0
U += 128;
V += 128;
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_YU64:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int Y,U,V,R,G,B;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 4;
bptr++; //read only the high byte out of the 16-bit
Y = bptr[0]-16;
V = bptr[2]-128;
Y+= bptr[4]-16; Y>>=1;
U = bptr[6]-128;
R = (9535*Y + 14688*V)>>13; //13-bit white
G = (9535*Y - 4375*V - 1745*U)>>13;
B = (9535*Y + 17326*U)>>13;
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
if(scaledvectorscope)
{
U *= 255; U /= 314;
V *= 255; V /= 244;
}
U += 128;
V += 128;
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_V210:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int Y,U,V,R,G,B;
uint32_t *lptr = (uint32_t *)sbase;
lptr += (x/6)*4;
switch(x % 6)
{
case 0:
V = ((*lptr>>02) & 0xff) - 128;
Y = ((*lptr>>12) & 0xff) - 16;
U = ((*lptr>>22) & 0xff) - 128;
lptr++;
Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1;
break;
case 1:
lptr++;
Y = ((*lptr>>02) & 0xff) - 16;
V = ((*lptr>>12) & 0xff) - 128;
Y+= ((*lptr>>22) & 0xff) - 16; Y>>=1;
lptr--;
U = ((*lptr>>22) & 0xff) - 128;
break;
case 2:
lptr++;
Y = ((*lptr>>22) & 0xff) - 16;
lptr++;
U = ((*lptr>>02) & 0xff) - 128;
Y+= ((*lptr>>12) & 0xff) - 16; Y>>=1;
V = ((*lptr>>22) & 0xff) - 128;
break;
case 3:
lptr++;
V = ((*lptr>>12) & 0xff) - 128;
lptr++;
U = ((*lptr>>02) & 0xff) - 128;
Y = ((*lptr>>12) & 0xff) - 16;
lptr++;
Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1;
break;
case 4:
lptr+=2;
V = ((*lptr>>22) & 0xff) - 128;
lptr++;
Y = ((*lptr>>02) & 0xff) - 16;
U = ((*lptr>>12) & 0xff) - 128;
Y+= ((*lptr>>22) & 0xff) - 16; Y>>=1;
break;
case 5:
lptr+=2;
V = ((*lptr>>22) & 0xff) - 128;
lptr++;
U = ((*lptr>>12) & 0xff) - 128;
Y = ((*lptr>>22) & 0xff) - 16;
lptr++;
Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1;
break;
}
R = (9535*Y + 14688*V)>>13; //13-bit white
G = (9535*Y - 4375*V - 1745*U)>>13;
B = (9535*Y + 17326*U)>>13;
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
if(scaledvectorscope)
{
U *= 255; U /= 314;
V *= 255; V /= 244;
}
U += 128;
V += 128;
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_RGB24:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int R,G,B,U,V;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 3;
R = bptr[2];
G = bptr[1];
B = bptr[0];
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_RGB32:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int R,G,B,U,V;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 4;
R = bptr[2];
G = bptr[1];
B = bptr[0];
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_BYR2:
case COLOR_FORMAT_BYR4:
//do nothing
break;
default:
assert(0);
#if (0 && DEBUG)
fprintf(stderr,"decoder.HistogramLine: Unsupported pixel format\n");
#endif
break;
}
}
void GhostBust(DECODER *decoder, unsigned short *sbaseL, unsigned short *sbaseR, int width, int ileakL, int ileakR)
{
#if 1
int x,RL,GL,BL,RR,GR,BR;
int nRL,nGL,nBL;
int nRR,nGR,nBR;
int max = 1024*1024-1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL>>=6;
ileakR>>=6;
if(sqrttable == NULL)
return;
for(x=0;x<width;x++)
{
RL = sbaseL[0]>>6;
GL = sbaseL[1]>>6; //10-bit
BL = sbaseL[2]>>6;
RL*=RL;
GL*=GL; //20-bit
BL*=BL;
RR = sbaseR[0]>>6;
GR = sbaseR[1]>>6; //10-bit
BR = sbaseR[2]>>6;
RR*=RR;
GR*=GR; //20-bit
BR*=BR;
nRL = RL*(1023-ileakL) + ileakL*max - RR*ileakL; //30-bit
nGL = GL*(1023-ileakL) + ileakL*max - GR*ileakL;
nBL = BL*(1023-ileakL) + ileakL*max - BR*ileakL;
nRL >>= 10; //20-bit
nGL >>= 10;
nBL >>= 10;
if(nRL>max) nRL=max; if(nRL<0) nRL=0;
if(nGL>max) nGL=max; if(nGL<0) nGL=0;
if(nBL>max) nBL=max; if(nBL<0) nBL=0;
if(sqrttable[nRL] == 65535)
sqrttable[nRL] = (int)sqrt(nRL);
if(sqrttable[nGL] == 65535)
sqrttable[nGL] = (int)sqrt(nGL);
if(sqrttable[nBL] == 65535)
sqrttable[nBL] = (int)sqrt(nBL);
sbaseL[0] = sqrttable[nRL]<<6;
sbaseL[1] = sqrttable[nGL]<<6;
sbaseL[2] = sqrttable[nBL]<<6;
sbaseL += 3;
nRR = RR*(1023-ileakR) + ileakR*max - RL*ileakR; //30-bit
nGR = GR*(1023-ileakR) + ileakR*max - GL*ileakR;
nBR = BR*(1023-ileakR) + ileakR*max - BL*ileakR;
nRR >>= 10; //20-bit
nGR >>= 10;
nBR >>= 10;
if(nRR>max) nRR=max; if(nRR<0) nRR=0;
if(nGR>max) nGR=max; if(nGR<0) nGR=0;
if(nBR>max) nBR=max; if(nBR<0) nBR=0;
if(sqrttable[nRR] == 65535)
sqrttable[nRR] = (int)sqrt(nRR);
if(sqrttable[nGR] == 65535)
sqrttable[nGR] = (int)sqrt(nGR);
if(sqrttable[nBR] == 65535)
sqrttable[nBR] = (int)sqrt(nBR);
sbaseR[0] = sqrttable[nRR]<<6;
sbaseR[1] = sqrttable[nGR]<<6;
sbaseR[2] = sqrttable[nBR]<<6;
sbaseR += 3;
}
#else // works and fast but has not image linearization, not as good
__m128i *ptrL = (__m128i *)sbaseL;
__m128i *ptrR = (__m128i *)sbaseR;
__m128i t,L,R,nL,nR;
int x,width8 = (width*3) & ~7;
__m128i white_epi16 = _mm_set1_epi16(32767);
__m128i leak_epi16 = _mm_set1_epi16(ileak>>1);
__m128i oneNegLeak_epi16 = _mm_set1_epi16(32767-(ileak>>1));
for(x=0;x<width8;x+=8)
{
L = _mm_load_si128(ptrL);
R = _mm_load_si128(ptrR);
L = _mm_srli_epi16(L,1); //15-bit
R = _mm_srli_epi16(R,1); //15-bit
nL = _mm_mulhi_epi16(L, oneNegLeak_epi16);
t = _mm_mulhi_epi16(white_epi16, leak_epi16);
nL = _mm_adds_epi16(nL, t);
t = _mm_mulhi_epi16(R, leak_epi16);
nL = _mm_subs_epu16(nL, t);
nR = _mm_mulhi_epi16(R, oneNegLeak_epi16);
t = _mm_mulhi_epi16(white_epi16, leak_epi16);
nR = _mm_adds_epi16(nR, t);
t = _mm_mulhi_epi16(L, leak_epi16);
nR = _mm_subs_epu16(nR, t);
L = _mm_slli_epi16(nL,2);
R = _mm_slli_epi16(nR,2);
_mm_store_si128(ptrL++, L);
_mm_store_si128(ptrR++, R);
}
#endif
}
void GhostBustRC(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR)
{
#if 1
int x,R,G,B;
int nR,nG,nB;
int max = 1024*1024-1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL>>=6;
ileakR>>=6;
if(sqrttable == NULL)
return;
for(x=0;x<width;x++)
{
R = sbase[0]>>6;
G = sbase[1]>>6; //10-bit
B = sbase[2]>>6;
R*=R;
G*=G; //20-bit
B*=B;
nR = R*(1023-ileakL) + ileakL*max - ((G+B)>>1)*ileakL; //30-bit
nG = G*(1023-ileakR) + ileakR*max - R*ileakR;
nB = B*(1023-ileakR) + ileakR*max - R*ileakR;
nR >>= 10; //20-bit
nG >>= 10;
nB >>= 10;
if(nR>max) nR=max; if(nR<0) nR=0;
if(nG>max) nG=max; if(nG<0) nG=0;
if(nB>max) nB=max; if(nB<0) nB=0;
if(sqrttable[nR] == 65535)
sqrttable[nR] = (int)sqrt(nR);
if(sqrttable[nG] == 65535)
sqrttable[nG] = (int)sqrt(nG);
if(sqrttable[nB] == 65535)
sqrttable[nB] = (int)sqrt(nB);
sbase[0] = sqrttable[nR]<<6;
sbase[1] = sqrttable[nG]<<6;
sbase[2] = sqrttable[nB]<<6;
sbase += 3;
}
#elif 0
int x;
float R,G,B;
float nR,nG,nB;
float fleakL = (float)ileakL / 65535.0;
float fleakR = (float)ileakR / 65535.0;
for(x=0;x<width;x++)
{
R = sbase[0];
G = sbase[1];
B = sbase[2];
R /= 65535.0;
G /= 65535.0;
B /= 65535.0;
R *= R;
G *= G;
B *= B;
nR = R*(1.0-fleakL) + fleakL - (G+B)*0.5*fleakL;
nG = G*(1.0-fleakR) + fleakR - R*fleakR;
nB = B*(1.0-fleakR) + fleakR - R*fleakR;
if(nR<0) nR=0;
if(nG<0) nG=0;
if(nB<0) nB=0;
nR = sqrt(nR);
nG = sqrt(nG);
nB = sqrt(nB);
sbase[0] = nR * 65535.0;
sbase[1] = nG * 65535.0;
sbase[2] = nB * 65535.0;
sbase += 3;
}
#elif 0
__m128i RGBRGB,rgb_epi32,RGB1,RGB2;
__m128i zero_epi128 = _mm_setzero_si128();
int x,width6 = (width*3) / 6 * 6;
__m128 white_ps = _mm_set1_ps(1.0);
__m128 mul_neg_leak_ps = _mm_set_ps(1.0 - ((float)ileakL/65536.0), 1.0 - ((float)ileakR/65536.0), 1.0 - ((float)ileakR/65536.0), 1.0 - ((float)ileakL/65536.0));
__m128 leak_ps = _mm_set_ps((float)ileakL/65536.0, (float)ileakR/65536.0, (float)ileakR/65536.0, (float)ileakL/65536.0);
__m128 scale_ps = _mm_set1_ps(65535.0);
__m128 scalehalf_ps = _mm_set1_ps(32767.0);
__m128 zero_ps = _mm_set1_ps(0.0);
__m128 rgb_ps, alt_rgb_ps;
__m128i sub_epi32;
__m128 sub_ps;
for(x=0;x<width6;x+=6) // two RGB pairs
{
int R,G,B;
RGBRGB = _mm_loadu_si128((__m128i *)sbase);
R = _mm_extract_epi16(RGBRGB, 0);
G = _mm_extract_epi16(RGBRGB, 1);
B = _mm_extract_epi16(RGBRGB, 2);
G+=B;
G>>=1;
sub_epi32 = _mm_set_epi32(G,R,R,G);
sub_ps = _mm_cvtepi32_ps(sub_epi32); // range 0 to 65535.0
sub_ps = _mm_div_ps(sub_ps, scale_ps); // range 0 to 1.0
sub_ps = _mm_mul_ps(sub_ps, sub_ps); // square
rgb_epi32 = _mm_unpacklo_epi16(RGBRGB, zero_epi128);
rgb_ps = _mm_cvtepi32_ps(rgb_epi32); // range 0 to 65535.0
rgb_ps = _mm_div_ps(rgb_ps, scale_ps); // range 0 to 1.0
rgb_ps = _mm_mul_ps(rgb_ps, rgb_ps); // square
rgb_ps = _mm_mul_ps(rgb_ps, mul_neg_leak_ps); // [R*(1.0-fleakL)] + fleakL - (G+B)*0.5*fleakL;
rgb_ps = _mm_add_ps(rgb_ps, leak_ps); // R*(1.0-fleakL) [+ fleakL] - (G+B)*0.5*fleakL;
sub_ps = _mm_mul_ps(sub_ps, leak_ps); // R*(1.0-fleakL) + fleakL - [(G+B)*0.5*fleakL;]
rgb_ps = _mm_sub_ps(rgb_ps, sub_ps); // R*(1.0-fleakL) + fleakL] [- (G+B)*0.5*fleakL;]
rgb_ps = _mm_max_ps(rgb_ps, zero_ps); // if(x < 0) x= 0;
rgb_ps = _mm_sqrt_ps(rgb_ps); // sqrt()
rgb_ps = _mm_mul_ps(rgb_ps, scalehalf_ps); // range 0 to 32767
RGB1 = _mm_cvtps_epi32(rgb_ps);
RGB1 = _mm_packs_epi32 (RGB1, zero_epi128);
RGB1 = _mm_slli_si128(RGB1, 10);
RGB1 = _mm_srli_si128(RGB1, 10);
RGBRGB = _mm_srli_si128(RGBRGB, 6);
R = _mm_extract_epi16(RGBRGB, 0);
G = _mm_extract_epi16(RGBRGB, 1);
B = _mm_extract_epi16(RGBRGB, 2);
G+=B;
G>>=1;
sub_epi32 = _mm_set_epi32(G,R,R,G);
sub_ps = _mm_cvtepi32_ps(sub_epi32); // range 0 to 65535.0
sub_ps = _mm_div_ps(sub_ps, scale_ps); // range 0 to 1.0
sub_ps = _mm_mul_ps(sub_ps, sub_ps); // square
rgb_epi32 = _mm_unpacklo_epi16(RGBRGB, zero_epi128);
rgb_ps = _mm_cvtepi32_ps(rgb_epi32); // range 0 to 65535.0
rgb_ps = _mm_div_ps(rgb_ps, scale_ps); // range 0 to 1.0
rgb_ps = _mm_mul_ps(rgb_ps, rgb_ps); // square
rgb_ps = _mm_mul_ps(rgb_ps, mul_neg_leak_ps); // [R*(1.0-fleakL)] + fleakL - (G+B)*0.5*fleakL;
rgb_ps = _mm_add_ps(rgb_ps, leak_ps); // R*(1.0-fleakL) [+ fleakL] - (G+B)*0.5*fleakL;
sub_ps = _mm_mul_ps(sub_ps, leak_ps); // R*(1.0-fleakL) + fleakL - [(G+B)*0.5*fleakL;]
rgb_ps = _mm_sub_ps(rgb_ps, sub_ps); // R*(1.0-fleakL) + fleakL] [- (G+B)*0.5*fleakL;]
rgb_ps = _mm_max_ps(rgb_ps, zero_ps); // if(x < 0) x= 0;
rgb_ps = _mm_sqrt_ps(rgb_ps); // sqrt()
rgb_ps = _mm_mul_ps(rgb_ps, scalehalf_ps); // range 0 to 32767
RGB2 = _mm_cvtps_epi32(rgb_ps);
RGB2 = _mm_packs_epi32 (RGB2, zero_epi128);
RGB2 = _mm_slli_si128(RGB2, 6);
RGB1 = _mm_adds_epi16(RGB1, RGB2);
RGB1 = _mm_slli_epi16(RGB1, 1);
RGB1 = _mm_slli_si128(RGB1, 4);
RGB1 = _mm_srli_si128(RGB1, 4);
RGBRGB = _mm_srli_si128(RGBRGB, 6);
RGBRGB = _mm_slli_si128(RGBRGB, 12);
RGBRGB = _mm_adds_epi16(RGB1, RGBRGB);
_mm_storeu_si128((__m128i *)sbase, RGBRGB);
sbase += 6;
}
#endif
}
void GhostBustAB(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR)
{
int x,R,G,B;
int nR,nG,nB;
int max = 1024*1024-1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL>>=6;
ileakR>>=6;
if(sqrttable == NULL)
return;
for(x=0;x<width;x++)
{
R = sbase[0]>>6;
G = sbase[1]>>6; //10-bit
B = sbase[2]>>6;
R*=R;
G*=G; //20-bit
B*=B;
nR = R*(1023-ileakL) + ileakL*max - B*ileakL;
nG = G*(1023-ileakL) + ileakL*max - B*ileakL;
nB = B*(1023-ileakR) + ileakR*max - ((R+G)>>1)*ileakR;
nR >>= 10; //20-bit
nG >>= 10;
nB >>= 10;
if(nR>max) nR=max; if(nR<0) nR=0;
if(nG>max) nG=max; if(nG<0) nG=0;
if(nB>max) nB=max; if(nB<0) nB=0;
if(sqrttable[nR] == 65535)
sqrttable[nR] = (int)sqrt(nR);
if(sqrttable[nG] == 65535)
sqrttable[nG] = (int)sqrt(nG);
if(sqrttable[nB] == 65535)
sqrttable[nB] = (int)sqrt(nB);
sbase[0] = sqrttable[nR]<<6;
sbase[1] = sqrttable[nG]<<6;
sbase[2] = sqrttable[nB]<<6;
sbase += 3;
}
}
void GhostBustGM(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR)
{
int x,R,G,B;
int nR,nG,nB;
int max = 1024*1024-1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL>>=6;
ileakR>>=6;
if(sqrttable == NULL)
return;
for(x=0;x<width;x++)
{
R = sbase[0]>>6;
G = sbase[1]>>6; //10-bit
B = sbase[2]>>6;
R*=R;
G*=G; //20-bit
B*=B;
nR = R*(1023-ileakL) + ileakL*max - G*ileakL;
nG = G*(1023-ileakR) + ileakR*max - ((R+B)>>1)*ileakR;
nB = B*(1023-ileakL) + ileakL*max - G*ileakL;
nR >>= 10; //20-bit
nG >>= 10;
nB >>= 10;
if(nR>max) nR=max; if(nR<0) nR=0;
if(nG>max) nG=max; if(nG<0) nG=0;
if(nB>max) nB=max; if(nB<0) nB=0;
if(sqrttable[nR] == 65535)
sqrttable[nR] = (int)sqrt(nR);
if(sqrttable[nG] == 65535)
sqrttable[nG] = (int)sqrt(nG);
if(sqrttable[nB] == 65535)
sqrttable[nB] = (int)sqrt(nB);
sbase[0] = sqrttable[nR]<<6;
sbase[1] = sqrttable[nG]<<6;
sbase[2] = sqrttable[nB]<<6;
sbase += 3;
}
}
void ProcessLine3D(DECODER *decoder, uint8_t *buffer, int bufferremain, uint8_t *output, int pitch, uint8_t *source_buffer, int source_pitch, int channel_offset, int y, int blank)
{
uint16_t *scratchline,*scratchline2,*scratchline3;
uint16_t *sptr;
uint16_t *srclineA,*srclineB;
uint16_t *dstlineA,*dstlineB;
int x,y2;
int width = decoder->frame.width;
int height = decoder->frame.height;
int skip = 3;
int sskip = 3;
uint8_t *bptr1;
uint8_t *bptr2;
uint8_t *baseptr1;
uint8_t *baseptr2;
float windowMaskL = decoder->cfhddata.channel[0].FloatingWindowMaskL;
float windowMaskR = decoder->cfhddata.channel[0].FloatingWindowMaskR;
float frameTilt = decoder->cfhddata.channel[0].FrameTilt;
float horizOffset = decoder->cfhddata.channel[1].HorizontalOffset;
float horizOffsetR = decoder->cfhddata.channel[2].HorizontalOffset;
float rotOffset = decoder->cfhddata.channel[1].RotationOffset;
float rotOffsetR = decoder->cfhddata.channel[2].RotationOffset;
float horizOffsetStep = 0;
float horizOffsetStepR = 0;
int flip1=0,flip2=0;
int channel_flip = decoder->cfhddata.channel_flip;
int source_pitch1 = source_pitch;
int source_pitch2 = source_pitch;
uint8_t *outputline = output+y*pitch;
uint8_t *outputline2 = NULL;
float horizOffsetBase;
float rotOffsetBase;
float horizOffsetBaseR;
float rotOffsetBaseR;
int formatdone = 0;
float xmin = decoder->cfhddata.channel[0].FrameMask.topLftX;
float xmax = decoder->cfhddata.channel[0].FrameMask.topRgtX;
//float ymin = decoder->cfhddata.channel[0].FrameMask.topLftY;
float ymax = decoder->cfhddata.channel[0].FrameMask.botLftY;
float zoom;
float zoomR;
float frameZoom1 = decoder->cfhddata.channel[1].FrameZoom;
float frameZoom2 = decoder->cfhddata.channel[2].FrameZoom;
float frameAutoZoom = decoder->cfhddata.channel[0].FrameAutoZoom;
float frameDiffZoom1 = decoder->cfhddata.channel[1].FrameDiffZoom;
float frameDiffZoom2 = decoder->cfhddata.channel[2].FrameDiffZoom;
float frameHDynamic = decoder->cfhddata.FrameHDynamic;
float frameHDynCenter = decoder->cfhddata.FrameHDynCenter;
float frameHDynWidth = decoder->cfhddata.FrameHDynWidth;
float frameHScale = decoder->cfhddata.FrameHScale;
int alphachannel = 0;
int whitepoint = 16;
float blursharpenL = decoder->cfhddata.channel[1].user_blur_sharpen;
float blursharpenR = decoder->cfhddata.channel[2].user_blur_sharpen;
float vignette = decoder->cfhddata.channel[0].user_vignette_start;
int flip_LR = 0;
float vig_r1;
float vig_r2;
float vig_gain;
if(blank) // blankline, no shifts required
{
windowMaskL = 0;
windowMaskR = 0;
frameTilt = 0;
horizOffset = 0;
horizOffsetR = 0;
rotOffset = 0;
rotOffsetR = 0;
frameZoom1 = 1.0;
frameZoom2 = 1.0;
frameAutoZoom = 1.0;
frameDiffZoom1 = 1.0;
frameDiffZoom2 = 1.0;
frameHScale = 1.0;
frameHDynamic = 1.0;
frameHDynCenter = 0.5;
frameHDynWidth = 0.0;
}
if( decoder->StereoBufferFormat == DECODED_FORMAT_RG64 ||
decoder->StereoBufferFormat == DECODED_FORMAT_W13A ||
decoder->StereoBufferFormat == DECODED_FORMAT_RGB32)
alphachannel = 1;
if(xmax == 0.0) xmax = 1.0;
if(ymax == 0.0) ymax = 1.0;
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
width *= 2;
}
if(decoder->source_channels < 2) // 2D
{
channel_flip &= 0x3;
channel_flip |= channel_flip<<2;
decoder->cfhddata.channel_flip = channel_flip;
}
if(!(decoder->cfhddata.process_path_flags & PROCESSING_COLORMATRIX) ||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER ||
decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY ||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED)
{
blursharpenL = 0.0;
blursharpenR = 0.0;
}
if(!(decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION))
{
horizOffset = rotOffset = 0;
horizOffsetR = rotOffsetR = 0;
frameTilt = 0;
frameAutoZoom = 1.0;
frameDiffZoom1 = 1.0;
frameDiffZoom2 = 1.0;
}
if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS))
{
channel_flip = 0;
}
if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING)
{
horizOffset += decoder->cfhddata.FrameOffsetX;
horizOffsetR -= decoder->cfhddata.FrameOffsetX;
frameZoom1 += frameHScale - 1.0f;
frameZoom2 += frameHScale - 1.0f;
if(frameHDynamic != 1.0)
{
frameZoom1 += 0.00001f;
frameZoom2 += 0.00001f;
}
if(vignette != 0.0)
{
float vig_diag = sqrtf(1.0f + ((float)decoder->frame.height / (float) decoder->frame.width) * ((float)decoder->frame.height / (float) decoder->frame.width));
vig_r1 = (vignette+1.0f);
vig_r2 = (decoder->cfhddata.channel[0].user_vignette_end+1.0f);
vig_gain = decoder->cfhddata.channel[0].user_vignette_gain;
vig_r1 *= vig_diag;
vig_r2 *= vig_diag;
}
}
else
{
frameZoom1 = 1.0f;
frameZoom2 = 1.0f;
vignette = 0;
}
zoom = frameZoom1 * frameAutoZoom * frameDiffZoom1;
if(frameDiffZoom2 != 0.0)
zoomR = frameZoom2 * frameAutoZoom / frameDiffZoom2;
else
zoomR = 0.0;
if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING)
{
if(decoder->cfhddata.InvertOffset)
{
rotOffset = -rotOffset;
rotOffsetR = -rotOffsetR;
rotOffset -= decoder->cfhddata.FrameOffsetR;
rotOffsetR -= -decoder->cfhddata.FrameOffsetR;
}
else
{
rotOffset += decoder->cfhddata.FrameOffsetR;
rotOffsetR += -decoder->cfhddata.FrameOffsetR;
}
}
rotOffsetBase = rotOffset;
horizOffsetBase = horizOffset;
rotOffsetBaseR = rotOffsetR;
horizOffsetBaseR = horizOffsetR;
horizOffset -= rotOffset * 0.5f;
horizOffsetStep = rotOffset / (float)height;
horizOffsetR -= rotOffsetR * 0.5f;
horizOffsetStepR = rotOffsetR / (float)height;
horizOffset += horizOffsetStep * y;
horizOffsetR += horizOffsetStepR * y;
assert(bufferremain >= width * 8 * 2 * 2);
baseptr1 = source_buffer;
baseptr2 = source_buffer + channel_offset;
if(channel_flip & 0xf)
{
if(channel_flip & 1)
{
flip1 = 1;
}
if(channel_flip & 4)
{
flip2 = 1;
}
}
if(source_pitch1 < 0)
flip_LR = 1;
decoder->sharpen_flip = 0;
if(channel_flip & 2) //ProcessLine3D
{
if(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1) // right channel only (stored in baseptr1)
{
}
else
{
baseptr1 += source_pitch1*(height-1);
source_pitch1 = -source_pitch1;
decoder->sharpen_flip = 1;
}
}
if(channel_flip & 8)
{
if(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1) // right channel only (stored in baseptr1)
{
baseptr1 += source_pitch1*(height-1);
source_pitch1 = -source_pitch1;
decoder->sharpen_flip = 1;
}
else
{
baseptr2 += source_pitch2*(height-1);
source_pitch2 = -source_pitch2;
}
}
bptr1 = baseptr1 + y*source_pitch1;
bptr2 = baseptr2 + y*source_pitch2;
y2 = y;
if(decoder->channel_blend_type == BLEND_FREEVIEW) //FreeView
{
if(y2 < height/4)
{
blank = 1;
y2 = 0;
}
else
{
y2 -= height/4;
y2 *= 2;
if(y2 >= height-1)
{
blank = 1;
y2 = height - 2;
}
}
bptr1 = baseptr1 + y2*source_pitch1;
bptr2 = baseptr2 + y2*source_pitch2;
}
srclineA = (uint16_t *)bptr1;
srclineB = (uint16_t *)bptr2;
scratchline = (uint16_t *)buffer;
scratchline2 = (uint16_t *)(buffer + width * 6 + width) /* as we pad the line */ ;;
scratchline3 = (uint16_t *)(buffer + width * 6*2 + width*2) /* as we pad the line */ ;
if(alphachannel)
{
scratchline = (uint16_t *)buffer;
scratchline2 = (uint16_t *)(buffer + width * 8 + width) /* as we pad the line */ ;;
scratchline3 = (uint16_t *)(buffer + width * 8*2 + width*2) /* as we pad the line */ ;
}
dstlineA = sptr = scratchline;
dstlineB = scratchline3;
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RG64:
whitepoint = 16;
skip = 8;
sskip = 4;
break;
case DECODED_FORMAT_W13A:
whitepoint = 13;
skip = 8;
sskip = 4;
break;
case DECODED_FORMAT_WP13:
whitepoint = 13;
skip = 6;
sskip = 3;
break;
case DECODED_FORMAT_RG48:
skip = 6;
sskip = 3;
break;
case DECODED_FORMAT_RGB32:
skip = 4;
break;
case DECODED_FORMAT_RGB24:
skip = 3;
break;
case DECODED_FORMAT_YUYV:
skip = 2;
break;
}
if(blank)
{
if(srclineA)
memset(srclineA, 0, width*skip);
if(srclineB && decoder->channel_decodes > 1)
memset(srclineB, 0, width*skip);
}
if(blursharpenL != 0.0 || blursharpenR != 0.0)
{
if(decoder->channel_blend_type == BLEND_FREEVIEW ||
decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC ||
decoder->channel_blend_type == BLEND_LINE_INTERLEAVED
)
{
decoder->doVerticalFilter = 0;
}
else
{
decoder->doVerticalFilter = 1;
}
}
{
switch(decoder->channel_blend_type)
{
case BLEND_FREEVIEW:
case BLEND_SIDEBYSIDE_ANAMORPHIC: //side by side
if(!blank)
{
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL || decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
dstlineA = srclineA;
sptr = dstlineA;
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(!alphachannel)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width/2, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width/2, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width/2, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width/2, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width/2, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width/2, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width/2, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width/2, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
int cwidth= width/2;
if(decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC)
cwidth= width;
FastVignetteInplaceWP13(decoder, width/2, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width/2, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width/2, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width/2, (int16_t *)srclineB, blursharpenR, decoder->frame.resolution, skip);
memcpy(dstlineA+sskip*(width/2), srclineB, width/2*sskip*2);
}
else
{
int16_t *ptr;
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(!alphachannel)
{
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
else
{
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
int cwidth= width/2;
if(decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC)
cwidth= width;
FastVignetteInplaceWP13(decoder, width, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineB, blursharpenR, decoder->frame.resolution, skip);
dstlineA = srclineA;
ptr = (int16_t *)srclineA;
for(x=0; x<width/2; x++)
{
*ptr++ = (ptr1[0]+ptr1[3])>>1;
*ptr++ = (ptr1[1]+ptr1[4])>>1;
*ptr++ = (ptr1[2]+ptr1[5])>>1 ;
ptr1+=sskip*2;
}
for(; x<width; x++)
{
*ptr++ = (ptr2[0]+ptr2[3])>>1;
*ptr++ = (ptr2[1]+ptr2[4])>>1;
*ptr++ = (ptr2[2]+ptr2[5])>>1;
ptr2+=sskip*2;
}
}
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, dstlineA, width/2, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, dstlineA, width/2, 0, windowMaskL);
if(xmin)
{
RGB48WindowMask(decoder, dstlineA, width/2, 1, xmin);
}
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 1, windowMaskR);
if(xmin)
{
RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 0, xmin);
}
}
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, dstlineA, dstlineA+width*sskip/2, width/2, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
memcpy(scratchline2+width*sskip/2, dstlineA, width*sskip*2/2);
memcpy(dstlineA, dstlineA+width*sskip/2, width*sskip*2/2);
memcpy(dstlineA+width*sskip/2, scratchline2+width*sskip/2, width*sskip*2/2);
}
}
break;
case BLEND_STACKED_ANAMORPHIC: //stacked
case BLEND_LINE_INTERLEAVED: //fields
if((y & 1) == 1) return;
if(!blank)
{
uint16_t *ptrA1 = (uint16_t *)srclineA;
uint16_t *ptrA2 = (uint16_t *)srclineA + (source_pitch1>>1);
uint16_t *ptrB1 = (uint16_t *)srclineB;
uint16_t *ptrB2 = (uint16_t *)srclineB + (source_pitch2>>1);
FastBlendWP13((short *)ptrA1, (short *)ptrA2, (short *)ptrA1/*output*/, width*skip);
FastBlendWP13((short *)ptrB1, (short *)ptrB2, (short *)ptrB1/*output*/, width*skip);
if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt)
{
if(!alphachannel)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip);
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
if(xmin)
{
RGB48WindowMask(decoder, srclineA, width, 1, xmin);
}
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
if(xmin)
{
RGB48WindowMask(decoder, srclineB, width, 0, xmin);
}
}
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
if(decoder->doVerticalFilter == 0)
{
if(decoder->channel_blend_type==BLEND_STACKED_ANAMORPHIC) //stacked
{
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
outputline2 = output+(y>>1)*pitch;
outputline = output+((y>>1)+(height/2))*pitch;
}
else
{
outputline = output+(y>>1)*pitch;
outputline2 = output+((y>>1)+(height/2))*pitch;
}
}
else //fields
{
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
outputline = output+(y)*pitch;
outputline2 = output+(y+1)*pitch;
}
else
{
outputline2 = output+(y)*pitch;
outputline = output+(y+1)*pitch;
}
}
if(flip_LR/*source_pitch1 < 0*/) // flip Left and Right
{
uint8_t *tmp = outputline2;
outputline2 = outputline;
outputline = tmp;
}
}
else
{
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
memcpy(scratchline2, srclineA, width*skip);
memcpy(srclineA, srclineB, width*skip);
memcpy(srclineB, scratchline2, width*skip);
}
}
}
break;
case BLEND_ONION: //onion
case BLEND_DIFFERENCE: //difference
case BLEND_SPLITVIEW: //splitView
if(!blank)
{
//dstlineA = source_buffer;
//dstlineA += (source_pitch>>1) * y;
sptr = dstlineA = srclineA;
srclineA = (uint16_t *)bptr1;
srclineB = (uint16_t *)bptr2;
if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt)
{
if(!alphachannel)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip);
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
if(xmin)
{
RGB48WindowMask(decoder, srclineA, width, 1, xmin);
}
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
if(xmin)
{
RGB48WindowMask(decoder, srclineB, width, 0, xmin);
}
}
x = 0;
if(decoder->channel_blend_type == BLEND_SPLITVIEW) //split view
{
int xsplit = width * (decoder->cfhddata.split_pos_xy & 0xff) / 255;
for(x = xsplit*sskip; x<width*sskip; x++)
{
srclineA[x] = srclineB[x];
}
}
else if(decoder->channel_blend_type == BLEND_ONION) //onion
{
FastBlendWP13((short *)srclineA, (short *)srclineB, (short *)dstlineA/*output*/, width*skip);
}
else if(decoder->channel_blend_type == BLEND_DIFFERENCE) //difference
{
#if XMMOPT
int width8 = (width*sskip) & 0xfff8;
__m128i mid_epi16;
//int unaligned = ((int)sbase) & 15;
//unaligned += ((int)in_rgb8) & 15;
if(whitepoint == 13)
mid_epi16 = _mm_set1_epi16(0x0fff);
else
mid_epi16 = _mm_set1_epi16(0x1fff);
for(x=0; x<width8; x+=8)
{
__m128i rgb16A = _mm_load_si128((__m128i *)&srclineA[x]);
__m128i rgb16B = _mm_load_si128((__m128i *)&srclineB[x]);
// 0 to 0xffff
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
rgb16A = _mm_subs_epi16(rgb16B, rgb16A); // -3fff to 3fff
}
else
{
rgb16A = _mm_subs_epi16(rgb16A, rgb16B);
}
rgb16A = _mm_adds_epi16(rgb16A, mid_epi16); // -0x1fff to 0x5fff , avg 0x1fff
_mm_store_si128((__m128i *)&dstlineA[x], rgb16A);
}
#endif
for(; x<width*sskip; x++)
{
int val;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
val = (srclineB[x] - srclineA[x]) + 32768;
}
else
{
val = (srclineA[x] - srclineB[x]) + 32768;
}
if(val > 0x7fff) val = 0x7fff;
if(val < 0) val = 0;
dstlineA[x] = val;
}
}
}
break;
case BLEND_ANAGLYPH_RC:
case BLEND_ANAGLYPH_RC_BW:
case BLEND_ANAGLYPH_AB:
case BLEND_ANAGLYPH_AB_BW:
case BLEND_ANAGLYPH_GM:
case BLEND_ANAGLYPH_GM_BW:
case BLEND_ANAGLYPH_DUBOIS: //Optimized
{
uint16_t *sptr1 = scratchline2;
uint16_t *sptr2 = scratchline3;
dstlineA = (uint16_t *)bptr1;
// dstlineA += (source_pitch>>1) * y;
sptr = dstlineA;
sptr1 = srclineA = (uint16_t *)bptr1;
sptr2 = srclineB = (uint16_t *)bptr2;
if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt)
{
if(!alphachannel)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline, width, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, scratchline2, scratchline, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, scratchline3, scratchline, width, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, scratchline2, scratchline, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, scratchline3, scratchline, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip);
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
if(xmin)
{
RGB48WindowMask(decoder, srclineA, width, 1, xmin);
}
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
if(xmin)
{
RGB48WindowMask(decoder, srclineB, width, 0, xmin);
}
}
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
uint16_t *tmp = srclineA;
srclineA = srclineB;
srclineB = tmp;
}
switch(decoder->channel_blend_type)
{
case BLEND_ANAGLYPH_RC:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
sptr[0] = ptr2[0];
sptr[1] = ptr1[1];
sptr[2] = ptr1[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
sptr[0] = ptr1[0];
sptr[1] = ptr2[1];
sptr[2] = ptr2[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_RC_BW:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y2;
sptr[1] = y1;
sptr[2] = y1;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y1;
sptr[1] = y2;
sptr[2] = y2;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_AB:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
sptr[0] = ptr2[0];
sptr[1] = ptr2[1];
sptr[2] = ptr1[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
sptr[0] = ptr1[0];
sptr[1] = ptr1[1];
sptr[2] = ptr2[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_AB_BW:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y2;
sptr[1] = y2;
sptr[2] = y1;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y1;
sptr[1] = y1;
sptr[2] = y2;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_GM:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
sptr[0] = ptr1[0];
sptr[1] = ptr2[1];
sptr[2] = ptr1[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
sptr[0] = ptr2[0];
sptr[1] = ptr1[1];
sptr[2] = ptr2[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_GM_BW:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y1;
sptr[1] = y2;
sptr[2] = y1;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y2;
sptr[1] = y1;
sptr[2] = y2;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_DUBOIS: //Optimized
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
int r,g,b;
for(x=0; x<width; x++)
{
r =(ptr1[0]*456 + ptr1[1]*500 + ptr1[2]*176 + ptr2[0]*-43 + ptr2[1]*-88 + ptr2[2]*-2 ) / 1000;
g =(ptr1[0]*-40 + ptr1[1]*-38 + ptr1[2]*-16 + ptr2[0]*378 + ptr2[1]*734 + ptr2[2]*-18 ) / 1000;
b =(ptr1[0]*-15 + ptr1[1]*-21 + ptr1[2]*-5 + ptr2[0]*-72 + ptr2[1]*-113+ ptr2[2]*1226) / 1000;
if(r<0) r=0; if(r>0x3fff) r=0x3fff;
if(g<0) g=0; if(g>0x3fff) g=0x3fff;
if(b<0) b=0; if(b>0x3fff) b=0x3fff;
sptr[0] = r;
sptr[1] = g;
sptr[2] = b;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
break;
}
}
break;
case BLEND_NONE:
default:
if(decoder->channel_decodes == 1) // only one channel
{
if(skip == 8)
{
//the data is already in the correct format
sptr = (unsigned short *)bptr1;
// shift if needed.
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(decoder->channel_current == 0)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGBA64HoriShift(decoder, sptr, scratchline2, width, -horizOffset, flip1);
else
RGBA64HoriShiftZoom(decoder, sptr, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGBA64HoriShift(decoder, sptr, scratchline2, width, horizOffsetR, flip2);
else
RGBA64HoriShiftZoom(decoder, sptr, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
else if(skip == 6)
{
//the data is already in the correct format
dstlineA = sptr = (unsigned short *)srclineA;
// shift if needed.
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(decoder->channel_current == 0)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
else
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineA, scratchline2, width, horizOffsetR, flip2);
else
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
if(vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineA, decoder->frame.resolution, skip);
}
if(decoder->channel_current == 0)
{
if(blursharpenL != 0.0)
{
FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip);
}
}
else
{
if(blursharpenR != 0.0)
{
FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenR, decoder->frame.resolution, skip);
}
}
}
if ((windowMaskL && decoder->channel_current == 0) || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
if(decoder->channel_current != 0) mask = xmin;
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
RGB48WindowMask(decoder, srclineA, width, 0, mask);
}
if ((windowMaskR && decoder->channel_current == 1) || (1.0f-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
if(decoder->channel_current != 1) mask = (1.0f-xmax);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineA, width, 1, windowMaskR);
RGB48WindowMask(decoder, srclineA, width, 1, mask);
}
}
else
{
outputline2 = output+(y+height)*pitch;
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
else
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2);
else
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
}
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
}
break;
}
}
if(!formatdone)
{
int flags = ACTIVEMETADATA_PRESATURATED;
int whitebitdepth = 16;
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A)
{
flags = 0;
whitebitdepth = 13;
}
if(outputline2)
{
// if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools)
// HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG48, whitebitdepth);
if(decoder->doVerticalFilter == 0) // No sharp stage so output now
{
if(alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
//if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools)
// HistogramLine(decoder, dstlineA, width, DECODED_FORMAT_RG48, whitebitdepth);
if(alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, srclineB,
outputline2, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, srclineB,
outputline2, pitch, decoder->frame.format, whitebitdepth, flags);
}
}
else
{
//if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools)
//{
// if(alphachannel)
// HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG64, whitebitdepth);
// else
// HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG48, whitebitdepth);
//}
if(decoder->doVerticalFilter == 0) // No sharp stage so output now
{
if(alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
}
}
}
}
void SharpenLine(DECODER *decoder, uint8_t *buffer, int bufferremain, uint8_t *output, int pitch, uint8_t *local_output, int local_pitch, int channel_offset, int y, int thread_index)
{
uint16_t *sbase;//*sbase2 = NULL;
int width = decoder->frame.width;
int height = decoder->frame.height;
int skip = 3;
//int flip1=0;//flip2=0;
int channel_flip = decoder->cfhddata.channel_flip;
//int local_pitch1 = local_pitch;
//int local_pitch2 = local_pitch;
uint8_t *outputline = output+y*pitch;
//uint8_t *outputline2 = NULL;
short *scratch;
//int formatdone = 0;
//float xmin = decoder->cfhddata.channel[0].FrameMask.topLftX;
//float xmax = decoder->cfhddata.channel[0].FrameMask.topRgtX;
//float ymin = decoder->cfhddata.channel[0].FrameMask.topLftY;
//float ymax = decoder->cfhddata.channel[0].FrameMask.botLftY;
int alphachannel = 0;
float blursharpen = 0;
int line_max = decoder->frame.height;
int yy = y;
if(decoder->channel_current == 0)
blursharpen = decoder->cfhddata.channel[1].user_blur_sharpen; // TODO LEFT and RIGHT separate vertical sharpen
else
blursharpen = decoder->cfhddata.channel[2].user_blur_sharpen; // TODO LEFT and RIGHT separate vertical sharpen
if(!(decoder->cfhddata.process_path_flags & PROCESSING_COLORMATRIX)||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER ||
decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY ||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED)
{
blursharpen = 0.0;
}
if(decoder->channel_mix_half_res == 1)
line_max *= 2;
if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS))
{
channel_flip = 0;
}
if(decoder->sharpen_flip) //SharpenLine
{
//if(!(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1)) // right channel only (stored in baseptr1)
{
yy = (line_max - 1 - y);
outputline = output+yy*pitch;
}
}
if( decoder->StereoBufferFormat == DECODED_FORMAT_RG64 ||
decoder->StereoBufferFormat == DECODED_FORMAT_W13A ||
decoder->StereoBufferFormat == DECODED_FORMAT_RGB32)
alphachannel = 1;
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
width *= 2;
}
sbase = (uint16_t *)local_output;
sbase += (local_pitch>>1) * y;
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_W13A:
skip = 8;
break;
case DECODED_FORMAT_WP13:
skip = 6;
break;
case DECODED_FORMAT_RG48:
skip = 6;
break;
case DECODED_FORMAT_RGB32:
skip = 4;
break;
case DECODED_FORMAT_RGB24:
skip = 3;
break;
case DECODED_FORMAT_YUYV:
skip = 2;
break;
}
scratch = (short*)(buffer + width * skip * thread_index);
{
int flags = ACTIVEMETADATA_PRESATURATED;
int whitebitdepth = 16;
if((decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A))
{
int use_pitch = local_pitch;
int edgeclose = 0;
flags = 0;
whitebitdepth = 13;
if(blursharpen != 0.0 && local_pitch != 0)
{
short *Aptr,*Bptr,*Cptr,*Dptr,*Eptr;
switch(decoder->channel_blend_type)
{
case BLEND_STACKED_ANAMORPHIC:
sbase = (uint16_t *)local_output;
sbase += (local_pitch>>1) * y * 2;
if(y<=4) edgeclose = 1;
if(y>=2) Aptr = (short *)sbase - (local_pitch>>1) * 4; else Aptr = (short *)sbase;
if(y>=1) Bptr = (short *)sbase - (local_pitch>>1) * 2; else Bptr = (short *)sbase;
Cptr = (short *)sbase;
if(y<height-1) Dptr = (short *)sbase + (local_pitch>>1) * 2; else Dptr = (short *)sbase;
if(y<height-2) Eptr = (short *)sbase + (local_pitch>>1) * 4; else Eptr = (short *)sbase;
if(y>=height-4) edgeclose = 1;
use_pitch = local_pitch * 2;
break;
case BLEND_LINE_INTERLEAVED:
sbase = (uint16_t *)local_output;
if(y & 1)
{
y--;
sbase += (local_pitch>>1) * y;
}
else
{
sbase += (local_pitch>>1) * y;
sbase += channel_offset>>1;
}
if(y<=8) edgeclose = 1;
if(y>=4) Aptr = (short *)sbase - (local_pitch>>1) * 4; else Aptr = (short *)sbase;
if(y>=2) Bptr = (short *)sbase - (local_pitch>>1) * 2; else Bptr = (short *)sbase;
Cptr = (short *)sbase;
if(y<height-2) Dptr = (short *)sbase + (local_pitch>>1) * 2; else Dptr = (short *)sbase;
if(y<height-4) Eptr = (short *)sbase + (local_pitch>>1) * 4; else Eptr = (short *)sbase;
if(y>=height-8) edgeclose = 1;
use_pitch = local_pitch * 2;
break;
default:
if(y<=4) edgeclose = 1;
if(y>=2) Aptr = (short *)sbase - (local_pitch>>1) * 2; else Aptr = (short *)sbase;
if(y>=1) Bptr = (short *)sbase - (local_pitch>>1) * 1; else Bptr = (short *)sbase;
Cptr = (short *)sbase;
if(y<height-1) Dptr = (short *)sbase + (local_pitch>>1) * 1; else Dptr = (short *)sbase;
if(y<height-2) Eptr = (short *)sbase + (local_pitch>>1) * 2; else Eptr = (short *)sbase;
if(y>=height-4) edgeclose = 1;
use_pitch = local_pitch;
break;
}
if(skip == 8)
{
FastSharpeningBlurVW13A(Aptr, Bptr, Cptr, Dptr, Eptr, use_pitch, edgeclose,
scratch, width, blursharpen,
decoder->frame.resolution,
decoder->channel_blend_type);
}
else
{
FastSharpeningBlurVWP13(Aptr, Bptr, Cptr, Dptr, Eptr, use_pitch, edgeclose,
scratch, width, blursharpen,
decoder->frame.resolution,
decoder->channel_blend_type);
}
sbase = (uint16_t *)scratch;
}
}
if(alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, sbase,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, sbase,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
}
}
#if _GRAPHICS
void PaintFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format)
{
int x,y,v,width, height;
int maxR=0,maxG=0,maxB=0;
width = decoder->frame.width;
height = decoder->frame.height;
if(decoder->cfhddata.BurninFlags == 0)
return;
if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & ~1) // tools
{
if(decoder->tools == NULL)
{
#if _ALLOCATOR
decoder->tools = (ToolsHandle *)Alloc(decoder->allocator, sizeof(ToolsHandle));
#else
decoder->tools = (ToolsHandle *)MEMORY_ALLOC(sizeof(ToolsHandle));
#endif
if(decoder->tools)
{
memset(decoder->tools, 0, sizeof(ToolsHandle));
}
else
{
return;
}
}
}
decoder->frame.output_format = output_format;
#if _THREADED && 1
if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & ~1 && decoder->tools) // histogram/scopes/waveform
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits;
#if _DELAY_THREAD_START
if(decoder->tools->histogram == 0 && decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
{
int avgR=0,avgG=0,avgB=0;
// Post a message to the mailbox
mailbox->output = output;
if(height >= 1080)
{
mailbox->pitch = pitch*4; // only read every 4th scan line
workunits = height/4; // only read every 4th scan line
}
else if(height >= 540)
{
mailbox->pitch = pitch*2; // only read every 2th scan line
workunits = height/2; // only read every 2th scan line
}
else
{
mailbox->pitch = pitch; // read every scan line
workunits = height; // read every scan line
}
if(decoder->tools->histogram == 0)
{
mailbox->jobType = JOB_TYPE_HISTOGRAM; // histogram
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
for(x=0;x<256;x++)
{
avgR += decoder->tools->histR[x];
avgG += decoder->tools->histG[x];
avgB += decoder->tools->histB[x];
//if(maxR < decoder->histR[x]) maxR = decoder->histR[x];
//if(maxG < decoder->histG[x]) maxG = decoder->histG[x];
//if(maxB < decoder->histB[x]) maxB = decoder->histB[x];
}
avgR /= 256;
avgG /= 256;
avgB /= 256;
//maxR++;
//maxG++;
//maxB++;
decoder->tools->maxR = avgR*3;//maxR;
decoder->tools->maxG = avgG*3;//maxG;
decoder->tools->maxB = avgB*3;//maxB;
}
}
#endif
if(decoder->cfhddata.BurninFlags && DrawOpen(decoder))
{
if(decoder->cfhddata.BurninFlags & 3) // overlays / tools
{
#if _THREADED
//DrawInit(decoder);
//DrawStartThreaded(decoder);
if(decoder->draw_thread.pool.thread_count > 0)
{
DrawWaitThreaded(decoder);
}
else
#endif
{
DrawInit(decoder);
DrawMetadataObjects(decoder);
}
}
else
{
DrawInit(decoder);
}
if(decoder->drawSafeMarkers)
DrawSafeMarkers(decoder);
if(decoder->cfhddata.BurninFlags & 2) // tools
{
if(decoder->tools)
{
if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 16)
DrawGrid(decoder, 0/*decoder->MDPcurrent.parallax*/);
if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 2)
DrawHistogram(decoder, 0/*decoder->MDPcurrent.parallax*/);
if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 4)
DrawWaveform(decoder, 0/*decoder->MDPcurrent.parallax*/);
if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 8)
DrawVectorscope(decoder, 0/*decoder->MDPcurrent.parallax*/);
}
}
DrawScreen(decoder, output, pitch, output_format);
}
#if 0
#if _THREADED && 1
if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & 2 && decoder->tools) // histogram
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits;
int targetW, targetH;
if(width < 256 || height < 256)
return;
targetW = width / 4;
targetH = height / 8;
mailbox->output = output;
mailbox->pitch = pitch;
workunits = targetW;
mailbox->jobType = JOB_TYPE_BURNINS; // burnin
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
if(decoder->histogram == 0)
{
for(y=0; y<height; y+=4)
{
uint8_t *bptr = output;
bptr += pitch * y;
HistogramLine(decoder, (unsigned short *)bptr, width, output_format);
if(decoder->histogram == 0)
return; // don't know how to create Histogram for that format
}
}
for(x=1;x<255;x++)
{
if(maxR < decoder->histR[x]) maxR = decoder->histR[x];
if(maxG < decoder->histG[x]) maxG = decoder->histG[x];
if(maxB < decoder->histB[x]) maxB = decoder->histB[x];
}
maxR++;
maxG++;
maxB++;
decoder->maxR = maxR;
decoder->maxG = maxG;
decoder->maxB = maxB;
for(x=0; x<targetW; x++)
{
HistogramRender(decoder, output, pitch, output_format, x, targetW, targetH);
}
#endif
#endif
if(decoder->tools)
memset(decoder->tools, 0, sizeof(ToolsHandle));
}
#endif
extern int geomesh_alloc_cache(void *gm);
#define DEG2RAD(d) (PI*(d)/180.0f)
#define RAD2DEG(r) (180.0f*(r)/PI)
bool approx_equal(int x, int y)
{
if(y > 1080)
{
x >>= 6;
y >>= 6;
}
else if(y > 540)
{
x >>= 5;
y >>= 5;
} else
{
x >>= 4;
y >>= 4;
}
if(x == y || x+1 == y || x == y+1)
return true;
return false;
}
bool approx_equal_float(float x, float y)
{
if (x*0.99 < y && y < x*1.01)
return true;
return false;
}
#if WARPSTUFF
void WarpFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format)
{
int width, height;
//int maxR = 0, maxG = 0, maxB = 0;
int status = WARPLIB_SUCCESS;
CFHDDATA *cfhddata = &decoder->cfhddata;
int backgroundfill = cfhddata->lensFill;
float sensorcrop = 1.0;
float phi, theta, rho;
int srcLens = HERO4;
if (!cfhddata->doMesh) return;
if (decoder->lastLensOffsetX != cfhddata->LensOffsetX ||
decoder->lastLensOffsetY != cfhddata->LensOffsetY ||
decoder->lastLensOffsetZ != cfhddata->LensOffsetZ ||
decoder->lastLensOffsetR != cfhddata->LensOffsetR ||
decoder->lastLensZoom != cfhddata->LensZoom ||
decoder->lastLensFishFOV != cfhddata->LensFishFOV ||
decoder->lastLensGoPro != cfhddata->lensGoPro ||
decoder->lastLensSphere != cfhddata->lensSphere ||
decoder->lastLensFill != cfhddata->lensFill ||
decoder->lastLensStyleSel != cfhddata->lensStyleSel ||
memcmp(decoder->lastLensCustomSRC, cfhddata->lensCustomSRC, sizeof(cfhddata->lensCustomSRC)) ||
memcmp(decoder->lastLensCustomDST, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST)) )
{
if (decoder->mesh)
geomesh_destroy(decoder->mesh);
width = decoder->frame.width;
height = decoder->frame.height;
if (approx_equal(width, height * 2)) // approx. 2:1
{
float outputaspect = 16.0f/9.0f;
srcLens = EQUIRECT;
sensorcrop = 1.00623f; // Fixes the slight calculation error difference between 16x9 with a 4x3, and 16x9 within a 2x1 image.
if (cfhddata->lensCustomSRC[1])
{
outputaspect = cfhddata->lensCustomSRC[0] / cfhddata->lensCustomSRC[1];
if (outputaspect >= 1.0f && outputaspect <= 3.0f)
{
//float sourceratio = (float)width / (float)height;
if (approx_equal_float(outputaspect, 4.0f / 3.0f))
sensorcrop = sqrtf((float)(width*width + height*height)) / sqrtf((float)((width * 2 / 3)*(width * 2 / 3) + (height*height)));
if (approx_equal_float(outputaspect, 16.0f / 9.0f)) // 0.88;
sensorcrop = 1.00623f; // Fixes the slight calculation error difference between 16x9 with a 4x3, and 16x9 within a 2x1 image.
}
}
if (width >= 2496)
decoder->mesh = geomesh_create(199, 99);
else if (width >= 1272)
decoder->mesh = geomesh_create(99, 49);
else
decoder->mesh = geomesh_create(49, 25);
phi = cfhddata->LensOffsetX * DEG2RAD(720.0f); // +-180deg HFOV for 2:1
theta = cfhddata->LensOffsetY * DEG2RAD(720.0f); // +-180deg VFOV for 2:1
rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg
}
else if (approx_equal(width * 3, height * 4)) // approx. 4:3
{
srcLens = HERO4;
sensorcrop = 1.0;
if (width > 2880) // UHD
decoder->mesh = geomesh_create(159, 119);
else if (width >= 1920) //HD/2.7K
decoder->mesh = geomesh_create(79, 59);
else
decoder->mesh = geomesh_create(39, 29);
phi = cfhddata->LensOffsetX * DEG2RAD(120.0f); // +-60deg HFOV for 16:9
theta = cfhddata->LensOffsetY * DEG2RAD(98.0f); // +-49deg VFOV for 16:9
rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg
}
else //if(approx_equal(width*9,height*16)) // approx. 16:9
{
srcLens = HERO4;
sensorcrop = sqrtf(1920 * 1920 + 1080 * 1080) / sqrtf(2000 * 2000 + 1500 * 1500); // 3840x2160 from 4000x3000
if (width > 2880) // UHD
decoder->mesh = geomesh_create(159, 119);
else if (width >= 1920) //HD/2.7K
decoder->mesh = geomesh_create(79, 59);
else
decoder->mesh = geomesh_create(39, 29);
phi = cfhddata->LensOffsetX * DEG2RAD(120.0f); // +-60.1deg HFOV for 16:9
theta = cfhddata->LensOffsetY * DEG2RAD(70.0f); // +-34.75deg VFOV for 16:9
rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg
}
if ((output_format & 0x7fffffff) == COLOR_FORMAT_YUYV)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_YUY2, width, height, pitch, WARPLIB_FORMAT_YUY2, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_RGB32)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_32BGRA, width, height, pitch, WARPLIB_FORMAT_32BGRA, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_W13A)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_W13A, width, height, pitch, WARPLIB_FORMAT_W13A, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_WP13)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_WP13, width, height, pitch, WARPLIB_FORMAT_WP13, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_RG48)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_RG48, width, height, pitch, WARPLIB_FORMAT_RG48, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_BGRA64)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_64ARGB, width, height, pitch, WARPLIB_FORMAT_64ARGB, backgroundfill);
else
assert(0);
if (cfhddata->lensSphere == 1)
{
if (cfhddata->lensGoPro != 2) // not outputting EQUIRECT
{
if (cfhddata->LensOffsetR != 0.0)
{
//float angle = 360.0 * asinf(cfhddata->LensOffsetR * 1.7777777777) / (2.0 * 3.14159);
float angle = 360.0f * cfhddata->LensOffsetR * cfhddata->LensOffsetR * 2.1f;//asinf(cfhddata->LensOffsetR * 1.7777777777) / (2.0 * 3.14159);
if (cfhddata->LensOffsetR < 0.0) angle = -angle;
geomesh_transform_rotate(decoder->mesh, angle);
}
if (cfhddata->LensZoom != 1.0)
geomesh_transform_scale(decoder->mesh, cfhddata->LensZoom, cfhddata->LensZoom);
if (cfhddata->LensFishFOV != 0.0) // DeFish
{
float fov = cfhddata->LensFishFOV;// *180.0;
if (fov > 89.9f) fov = 89.9f;
if (fov < -89.9f) fov = -89.9f;
if (fov)
status |= geomesh_transform_defish(decoder->mesh, fov);
}
}
switch (cfhddata->lensGoPro)
{
case 0: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, RECTILINEAR); break;
case 1: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, HERO4); break;
case 2: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, EQUIRECT); break;
case 4:
geomesh_set_custom_lens(decoder->mesh, cfhddata->lensCustomSRC, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST));
if (srcLens == EQUIRECT) geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, EQUIRECT, CUSTOM_LENS);
else geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, CUSTOM_LENS, CUSTOM_LENS);
break;
}
}
else // old boring geometry
{
if (cfhddata->LensZoom != 1.0)
geomesh_transform_scale(decoder->mesh, cfhddata->LensZoom, cfhddata->LensZoom);
// basic orthographic moves
if (cfhddata->LensOffsetX != 0.0 || cfhddata->LensOffsetY != 0.0)
geomesh_transform_pan(decoder->mesh, cfhddata->LensOffsetX*(float)width, -cfhddata->LensOffsetY*(float)height);
if (cfhddata->LensOffsetR != 0.0)
{
float angle = 360.0f * asinf(cfhddata->LensOffsetR * 1.7777777777f) / (2.0f * 3.14159f);
geomesh_transform_rotate(decoder->mesh, angle);
}
if (cfhddata->lensGoPro == 0) //Rectilear
status |= geomesh_transform_gopro_to_rectilinear(decoder->mesh, sensorcrop);
//status |= geomesh_fisheye_gopro_adjustmesh(mesh, &correction_mode, WARPLIB_ALGORITHM_PRESERVE_EVERYTHING,//WARPLIB_ALGORITHM_BEST_FIT,
// width, height, product, model, lens_type, fov, (int)decoder->frame.resolution);
}
geomesh_alloc_cache(decoder->mesh); // required for JOB_TYPE_WARP_CACHE
if (status == WARPLIB_SUCCESS)
{
if (decoder->lens_correct_buffer == NULL)
{
#if _ALLOCATOR
decoder->lens_correct_buffer = (int *)Alloc(decoder->allocator, pitch * height);
#else
decoder->lens_correct_buffer = (int *)MEMORY_ALLOC(pitch * height);
#endif
}
}
else
{
return;
}
/* need resources?
{
if(decoder->tools == NULL)
{
#if _ALLOCATOR
decoder->tools = (ToolsHandle *)Alloc(decoder->allocator, sizeof(ToolsHandle));
#else
decoder->tools = (ToolsHandle *)MEMORY_ALLOC(sizeof(ToolsHandle));
#endif
if(decoder->tools)
{
memset(decoder->tools, 0, sizeof(ToolsHandle));
}
else
{
return;
}
}
}
*/
#if _THREADED && 1
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits = decoder->frame.height;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16,
WorkerThreadProc,
decoder);
}
#endif
{
// Post a message to the mailbox
mailbox->data = decoder->mesh;
mailbox->output = output;
mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer;
mailbox->line_max = decoder->frame.height;
mailbox->chunk_size = 16;
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
mailbox->jobType = JOB_TYPE_WARP_CACHE;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
#endif
//decoder->frame.output_format = output_format;
decoder->lastLensOffsetX = cfhddata->LensOffsetX;
decoder->lastLensOffsetY = cfhddata->LensOffsetY;
decoder->lastLensOffsetZ = cfhddata->LensOffsetZ;
decoder->lastLensOffsetR = cfhddata->LensOffsetR;
decoder->lastLensZoom = cfhddata->LensZoom;
decoder->lastLensFishFOV = cfhddata->LensFishFOV;
decoder->lastLensGoPro = cfhddata->lensGoPro;
decoder->lastLensSphere = cfhddata->lensSphere;
decoder->lastLensFill = cfhddata->lensFill;
decoder->lastLensStyleSel = cfhddata->lensStyleSel;
memcpy(decoder->lastLensCustomSRC, cfhddata->lensCustomSRC, sizeof(cfhddata->lensCustomSRC));
memcpy(decoder->lastLensCustomDST, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST));
}
#if _THREADED && 1
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits = decoder->frame.height;
mailbox->data = decoder->mesh;
mailbox->output = output;
mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer;
mailbox->line_max = decoder->frame.height;
mailbox->chunk_size = 16;
workunits = (mailbox->line_max + mailbox->chunk_size-1)/mailbox->chunk_size;
mailbox->jobType = JOB_TYPE_WARP;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
if(backgroundfill) // may need to blur the filled in areas
{
mailbox->data = decoder->mesh;
mailbox->output = (uint8_t *)decoder->lens_correct_buffer;
mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer;
mailbox->line_max = decoder->frame.width;
mailbox->chunk_size = 16;
mailbox->pitch = pitch;
workunits = (mailbox->line_max + mailbox->chunk_size-1)/mailbox->chunk_size;
mailbox->jobType = JOB_TYPE_WARP_BLURV;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
#else // not threading
{
//geomesh_cache_init_bilinear(decoder->mesh); //bad
geomesh_cache_init_bilinear_range(decoder->mesh, 0, decoder->frame.height); //good
geomesh_apply_bilinear(decoder->mesh, (unsigned char *)output, (unsigned char *)decoder->lens_correct_buffer, 0, decoder->frame.height);
}
#endif
memcpy(output, decoder->lens_correct_buffer, pitch * decoder->frame.height);
/*
if(lens_correct_buffer)
#if _ALLOCATOR
Free(decoder->allocator, lens_correct_buffer);
#else
MEMORY_ALIGNED_FREE(lens_correct_buffer);
#endif
geomesh_destroy(mesh);
*/
}
void MaskFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format)
{
int x, y, width, height;
int minY, maxY;
int minX, maxX;
CFHDDATA *cfhddata = &decoder->cfhddata;
uint8_t *line = output;
uint32_t fillA = 0;
uint32_t fillB = 0;
int bitsize = 8;
if (!cfhddata->doMesh) return;
width = decoder->frame.width;
height = decoder->frame.height;
if (decoder->cfhddata.LensYmin == 0.0 && decoder->cfhddata.LensXmin == 0.0 && decoder->cfhddata.LensYmax == 0.0 && decoder->cfhddata.LensXmax == 0.0) return;
if (decoder->cfhddata.LensYmin == 0.0 && decoder->cfhddata.LensXmin == 0.0 && decoder->cfhddata.LensYmax == 1.0 && decoder->cfhddata.LensXmax == 1.0) return;
minY = (int)(decoder->cfhddata.LensYmin*(float)height);
maxY = (int)(decoder->cfhddata.LensYmax*(float)height);
minX = 0xfffc & (int)(decoder->cfhddata.LensXmin*(float)pitch);
maxX = 0xfffc & (int)(decoder->cfhddata.LensXmax*(float)pitch);
if (FORMATRGB(output_format))
{
line = output;
// Top rows
for (y = 0; y < minY; y++)
{
memset(line, 0, abs(pitch));
line += pitch;
}
// Left and Right edges of middle rows
if (maxX - minX != pitch)
{
for (; y < maxY; y++)
{
memset(line, 0, minX);
memset(line + maxX, 0, pitch - maxX);
line += pitch;
}
}
//Bottom wows
y = maxY;
line = output + y*pitch;
for (; y < height; y++)
{
memset(line, 0, abs(pitch));
line += pitch;
}
}
else
{
switch (output_format & 0x7fffffff)
{
case COLOR_FORMAT_YVYU:
case COLOR_FORMAT_YUYV:
fillA = 0x10;
fillB = 0x80;
break;
case COLOR_FORMAT_UYVY:
case COLOR_FORMAT_2VUY:
fillA = 0x80;
fillB = 0x10;
break;
case COLOR_FORMAT_YU64:
fillA = 0x8000;
fillB = 0x1000;
bitsize = 16;
break;
}
}
if (bitsize == 8)
{
line = output;
// Top rows
for (y = 0; y < minY; y++)
{
for (x = 0; x < pitch; x += 2)
{
line[x] = fillA;
line[x + 1] = fillB;
}
line += pitch;
}
// Left and Right edges of middle rows
if (maxX - minX != pitch)
{
for (; y < maxY; y++)
{
for (x = 0; x < minX; x += 2)
{
line[x] = fillA;
line[x + 1] = fillB;
}
for (x = maxX; x < pitch; x += 2)
{
line[x] = fillA;
line[x + 1] = fillB;
}
line += pitch;
}
}
//Bottom wows
y = maxY;
line = output + y*pitch;
for (; y < height; y++)
{
for (x = 0; x < pitch; x += 2)
{
line[x] = fillA;
line[x + 1] = fillB;
}
line += pitch;
}
}
}
#endif //#if WARPSTUFF
void ConvertLocalToOutput(DECODER *decoder, uint8_t *output, int pitch, int output_format, uint8_t *local_output, int local_pitch, int channel_offset)
{
uint8_t *local_output_double = local_output;
//Frame_Region emptyFrameMask = {0};
if(decoder->StereoBuffer)
local_output_double = local_output = (uint8_t *)decoder->StereoBuffer;
if(channel_offset < 0) // channel swapped
{
channel_offset = -channel_offset;
}
if(INVERTEDFORMAT(decoder->frame.format) != INVERTEDFORMAT(output_format))
{
local_output += local_pitch*(decoder->frame.height-1);
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC)
local_output_double += local_pitch*(decoder->frame.height*decoder->channel_decodes-1);
else
local_output_double = local_output;
local_pitch = -local_pitch;
}
if(FLIPCOLORS(output_format) || output_format & 0x80000000)
{
decoder->cfhddata.InvertOffset = 1;
}
else
{
decoder->cfhddata.InvertOffset = 0;
}
decoder->frame.format = output_format;
//decoder->frame.colorspace = COLOR_SPACE_CG_601;
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
if( ((decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION) &&
(decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[1].FrameKeyStone ||
decoder->cfhddata.channel[1].VerticalOffset ||
decoder->cfhddata.channel[1].RotationOffset ||
decoder->cfhddata.channel[1].FrameTilt ||
decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameKeyStone ||
decoder->cfhddata.channel[2].VerticalOffset ||
decoder->cfhddata.channel[2].RotationOffset ||
decoder->cfhddata.channel[2].FrameTilt))
||
((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) &&
(decoder->cfhddata.FrameOffsetY ||
decoder->cfhddata.FrameOffsetR ||
// decoder->cfhddata.FrameOffsetX || ||
decoder->cfhddata.FrameHScale != 1.0 ||
decoder->cfhddata.FrameHDynamic != 1.0 ||
decoder->cfhddata.channel[1].FrameZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameZoom != 1.0) ))
{
//int x;
int xbytes, xstep;
//uint8_t *base = local_output;
int width, height, chunk_size;
int fine_vertical = 0;
width = decoder->frame.width;
height = decoder->frame.height;
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
xbytes = width*4;
xstep = 16;
break;
case DECODED_FORMAT_RGB24:
xbytes = width*3;
xstep = 16;
break;
case DECODED_FORMAT_YUYV:
xbytes = width*2;
xstep = 16;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
xbytes = width*8;
xstep = 32;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
xbytes = width*6;
xstep = 32;
break;
default:
assert(0);
break;
}
if(!(decoder->cfhddata.process_path_flags & (PROCESSING_ORIENTATION|PROCESSING_FRAMING)) ||
(decoder->cfhddata.channel[1].RotationOffset == 0.0 && decoder->cfhddata.channel[1].FrameKeyStone == 0.0 &&
decoder->cfhddata.channel[2].RotationOffset == 0.0 && decoder->cfhddata.channel[2].FrameKeyStone == 0.0 &&
decoder->cfhddata.FrameOffsetR == 0.0))
{
chunk_size = 8;
}
else
{
chunk_size = 1;
if((fabs(decoder->cfhddata.channel[1].RotationOffset) +
fabs(decoder->cfhddata.channel[1].FrameKeyStone*0.2) +
fabs(decoder->cfhddata.FrameOffsetR)) > 0.015 ||
(fabs(decoder->cfhddata.channel[2].RotationOffset) +
fabs(decoder->cfhddata.channel[2].FrameKeyStone*0.2) +
fabs(decoder->cfhddata.FrameOffsetR)) > 0.015)
{
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
xstep = 4;
break;
case DECODED_FORMAT_RGB24:
xstep = 3;
break;
case DECODED_FORMAT_YUYV:
xstep = 4;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
xstep = 8;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
default:
xstep = 6;
break;
}
fine_vertical = 1;
}
}
if( decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422 &&
(decoder->frame.resolution == DECODED_RESOLUTION_FULL ||
decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) &&
decoder->codec.progressive == false)
{
int interlaced_pitch = local_pitch * 2;
uint8_t *field2_output = local_output + local_pitch;
// Post a message to the mailbox
mailbox->local_output = local_output;
mailbox->local_pitch = interlaced_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->info.height >>= 1;
mailbox->line_max = (xbytes + xstep-1)/xstep;
mailbox->chunk_size = chunk_size;
mailbox->fine_vertical = fine_vertical;
mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
// Post a message to the mailbox
mailbox->local_output = field2_output;
mailbox->local_pitch = interlaced_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->info.height >>= 1;
mailbox->chunk_size = chunk_size;
mailbox->line_max = (xbytes + xstep-1)/xstep;
mailbox->fine_vertical = fine_vertical;
mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
else
{
//TODO Lens corect here.
//call JOB_TYPE_VERTICAL_3D then (or lens correction equivalent.)
// JOB_TYPE_HORIZONTAL_3D
//before doing any offset and rotation corrections.
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) //HACK //DAN20110129
width /= 2;
// Post a message to the mailbox
mailbox->local_output = local_output;
mailbox->local_pitch = local_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->chunk_size = chunk_size;
mailbox->line_max = (xbytes + xstep-1)/xstep;
mailbox->fine_vertical = fine_vertical;
mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->local_output = local_output;
mailbox->local_pitch = local_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->chunk_size = 16;
mailbox->line_max = decoder->frame.height;
if(decoder->channel_mix_half_res == 1)
mailbox->line_max *= 2;
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
decoder->doVerticalFilter = 0;
mailbox->jobType = JOB_TYPE_HORIZONAL_3D; // 3d work && horizontal and vertical flips
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
if(decoder->doVerticalFilter)
{
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->local_output = local_output_double;
mailbox->local_pitch = local_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->chunk_size = 16;
mailbox->line_max = decoder->frame.height;
if(decoder->channel_decodes == 2 && decoder->channel_blend_type == 0)
mailbox->line_max *= 2;
if(decoder->channel_mix_half_res == 1)
mailbox->line_max *= 2;
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
mailbox->jobType = JOB_TYPE_SHARPEN; // 3d work && horizontal and vertical flips
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
#else
{
int y,width, height;
uint8_t scratch[4096*16];
int scratchremain = 4096*16;
int ymin = 0, ymax;
width = decoder->frame.width;
height = decoder->frame.height;
ymax = height;
if((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) &&
memcmp(&decoder->cfhddata.channel[0].FrameMask, &emptyFrameMask, 32))
{
ymin = (float)height * decoder->cfhddata.channel[0].FrameMask.topLftY;
ymax = (float)height * decoder->cfhddata.channel[0].FrameMask.botLftY;
}
if( ((decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION) &&
(decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[1].FrameKeyStone ||
decoder->cfhddata.channel[1].VerticalOffset ||
decoder->cfhddata.channel[1].RotationOffset ||
decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameKeyStone ||
decoder->cfhddata.channel[2].VerticalOffset ||
decoder->cfhddata.channel[2].RotationOffset))
||
((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) &&
(decoder->cfhddata.FrameOffsetY ||
decoder->cfhddata.FrameOffsetR ||
decoder->cfhddata.FrameOffsetX ||
decoder->cfhddata.FrameHScale != 1.0 ||
decoder->cfhddata.FrameHDynamic != 1.0 ||
decoder->cfhddata.channel[1].FrameZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameZoom != 1.0))
{
int x,xbytes, xstep;
uint8_t *base = local_output;
float voffsetstep;
float voffset = decoder->cfhddata.channel[1].VerticalOffset;
float roffset = decoder->cfhddata.channel[1].RotationOffset;
float voffset1, voffset2;
float voffsetstep1, voffsetstep2;
int channel_flip = decoder->cfhddata.channel_flip;
int aspectx,aspecty;
float aspectfix;
GetDisplayAspectRatio(decoder, &aspectx, &aspecty);
aspectfix = (float)(aspectx*aspectx) / (float)(aspecty*aspecty);
if(!(decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION))
{
voffset = roffset = 0;
}
if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS))
{
channel_flip = 0;
}
if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING)
voffset += decoder->cfhddata.FrameOffsetY;
if(decoder->cfhddata.InvertOffset)
{
voffset = -voffset;
roffset = -roffset;
}
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
xbytes = width*4;
xstep = 16;
break;
case DECODED_FORMAT_RGB24:
xbytes = width*3;
xstep = 16;
break;
case DECODED_FORMAT_YUYV:
xbytes = width*2;
xstep = 16;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
default:
xbytes = width*6;
xstep = 32;
break;
}
//DAN20100923 -- simplied
//voffset += roffset * (float)(width*width) / (float)(height*height) * 0.5;
//voffsetstep = -roffset * (float)(width*width) / (float)(height*height) / (float)(xbytes/xstep);
voffset += roffset * aspectfix * 0.5;
voffsetstep = -roffset * aspectfix / (float)(xbytes/xstep);
if(roffset == 0.0)
xstep = xbytes;
voffset1 = voffset2 = voffset;
voffsetstep1 = voffsetstep2 = voffsetstep;
if(channel_flip & 0xf)
{
if(channel_flip & 2)
{
voffset1 = -voffset1;
voffsetstep1 = -voffsetstep1;
}
if(channel_flip & 8)
{
voffset2 = -voffset2;
voffsetstep2 = -voffsetstep2;
}
if(channel_flip & 1)
{
voffset1 += voffsetstep1*(xbytes/xstep);
voffsetstep1 = -voffsetstep1;
}
if(channel_flip & 4)
{
voffset2 += voffsetstep2*(xbytes/xstep);
voffsetstep2 = -voffsetstep2;
}
}
for(x=0; x<xbytes; x+=xstep)
{
if(decoder->channel_decodes == 1 && decoder->channel_current == 1) // Right only
{
RGB48VerticalShift(decoder, base, (unsigned short *)scratch,
xstep, height, local_pitch, -voffset2);
}
else
{
RGB48VerticalShift(decoder, base, (unsigned short *)scratch,
xstep, height, local_pitch, voffset1);
}
if(decoder->channel_decodes == 2)
{
uint8_t *bptr = base + channel_offset;
RGB48VerticalShift(decoder, bptr, (unsigned short *)scratch,
xstep, height, local_pitch, -voffset2);
}
base += xstep;
voffset1 += voffsetstep1;
voffset2 += voffsetstep2;
}
}
if(decoder->channel_mix_half_res == 1)
height *= 2;
if(ymin)
{
memset(local_output, 0, abs(local_pitch)); // zero one line;
}
for(y=0; y<ymin; y++)
{
ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, 0, channel_offset, y, 0);
}
for(; y<ymax; y++)
{
ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, local_pitch, channel_offset, y, 0);
}
for(; y<height; y++)
{
ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, 0, channel_offset, y, 0);
}
}
#endif
}
// Decode a sample from the input bitstream into the output frame buffer
bool DecodeSample(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams, CFHDDATA *cfhddata)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int subband_wavelet_index[] = {5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 1, 1, 1, 0, 0, 0};
int channel_decodes = 1; // 3D Work
int channel_offset = 0;
int channel_mask = 0;
int channel_current = 0;
//int wavelet_index;
bool result = true;
uint8_t *local_output = output;
uint8_t *local_buffer = NULL;
int local_pitch = pitch;
int internal_format = decoder->frame.format;
int output_format = decoder->frame.output_format;
bool use_local_buffer = false;
DECODER *local_decoder = decoder;
//Frame_Region emptyFrameMask = {0};
Frame_Region emptyFrameMask = FRAME_REGION_INITIALIZER;
int orig_width = decoder->frame.width;
int orig_height = decoder->frame.height;
decoder->local_output = local_output; // used for NV12 decodes.
decoder->sample_uncompressed = 0; // set if a uncompressed sample is found.
decoder->image_dev_only = 0;
if(decoder->flags & (1<<3)) // This is an image development only decode.
{
decoder->sample_uncompressed = 1;
decoder->image_dev_only = 1;
decoder->codec.encoded_format = ENCODED_FORMAT_RGB_444;
decoder->codec.unique_framenumber = 0; //What should this be?
decoder->frame.white_point = 16; // how to we pass this in?
decoder->uncompressed_chunk = (uint32_t *)input->lpCurrentBuffer;
switch(output_format & 0x7fffffff)
{
case COLOR_FORMAT_RGB24:
decoder->uncompressed_size = orig_width * orig_height * 3;
break;
case COLOR_FORMAT_RGB32:
decoder->uncompressed_size = orig_width * orig_height * 4;
break;
case COLOR_FORMAT_RG48:
case COLOR_FORMAT_WP13:
decoder->uncompressed_size = orig_width * orig_height * 6;
break;
default:
decoder->uncompressed_size = orig_width * orig_height * 6;
assert(0);
break;
}
}
decoder->frame.alpha_Companded = 0; // reset this state.
if(decoder->parallelDecoder)
decoder->parallelDecoder->sample_uncompressed = 0;
decoder->error = CODEC_ERROR_OKAY;
input->error = BITSTREAM_ERROR_OKAY;
// first time through encoded_format is not initized.
if(input->nWordsUsed > 4096 && decoder->image_dev_only == 0) // an I-frame is needed
{
SAMPLE_HEADER header;
BITSTREAM input2;
InitBitstreamBuffer(&input2, input->lpCurrentWord, input->nWordsUsed, BITSTREAM_ACCESS_READ);
memset(&header, 0, sizeof(SAMPLE_HEADER));
header.find_lowpass_bands = 2; // help finding the uncompressed flag
if(ParseSampleHeader(&input2, &header))
{
decoder->codec.encoded_format = header.encoded_format;
decoder->sample_uncompressed = header.hdr_uncompressed;
if(decoder->parallelDecoder)
decoder->parallelDecoder->sample_uncompressed = header.hdr_uncompressed;
}
}
if((uintptr_t)input->lpCurrentBuffer & 0x3)
{
if(decoder->aligned_sample_buffer == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
decoder->aligned_sample_buffer =
(uint8_t *)AllocAligned(allocator, (size_t)input->dwBlockLength, 16);
#else
decoder->aligned_sample_buffer =
(uint8_t *)MEMORY_ALIGNED_ALLOC(input->dwBlockLength, 16);
#endif
memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength);
decoder->aligned_sample_buffer_size = input->dwBlockLength;
}
else
{
if ((size_t)input->dwBlockLength <= decoder->aligned_sample_buffer_size)
{
memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength);
}
else
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
FreeAligned(decoder->allocator, decoder->aligned_sample_buffer);
decoder->aligned_sample_buffer =
(uint8_t *)AllocAligned(allocator, input->dwBlockLength, 16);
#else
MEMORY_ALIGNED_FREE(decoder->aligned_sample_buffer);
decoder->aligned_sample_buffer =
(uint8_t *)MEMORY_ALIGNED_ALLOC(input->dwBlockLength, 16);
#endif
memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength);
decoder->aligned_sample_buffer_size = input->dwBlockLength;
}
}
input->lpCurrentBuffer = decoder->aligned_sample_buffer;
input->lpCurrentWord = decoder->aligned_sample_buffer;
}
#if 0 // Test for missaligning the image data
if(((int)input->lpCurrentBuffer&3) == 0)
{
int i;
uint8_t *ptr = (uint8_t *)input->lpCurrentBuffer;
int missaligned = 1; //2 or 3
for(i=input->dwBlockLength-1; i>=0; i--)
ptr[i+missaligned] = ptr[missaligned];
input->lpCurrentBuffer = (uint8_t *)&ptr[missaligned];
input->lpCurrentWord = (uint8_t *)&ptr[missaligned];
}
#endif
//HACK
// Unfortunately I need color matrix data deep within the codec for RT playback.
if(cfhddata && cfhddata->MagicNumber == CFHDDATA_MAGIC_NUMBER) // valid input
{
if(decoder->cfhddata.MagicNumber != CFHDDATA_MAGIC_NUMBER)
{
//int size = cfhddata->size;
size_t size = cfhddata->size;
memset(&decoder->cfhddata, 0, sizeof(CFHDDATA));
if (size > sizeof(CFHDDATA)) {
// Limit the size to the known structure
size = sizeof(CFHDDATA);
}
memcpy(&decoder->cfhddata, cfhddata, size);
}
}
else
{
unsigned short value;
if(decoder->cfhddata.MagicNumber != CFHDDATA_MAGIC_NUMBER || decoder->cfhddata.size != sizeof(CFHDDATA))
{
memset(&decoder->cfhddata, 0, sizeof(CFHDDATA));
decoder->cfhddata.MagicNumber = CFHDDATA_MAGIC_NUMBER;
decoder->cfhddata.size = sizeof(CFHDDATA);
if(decoder->image_dev_only) // For baseband image only corrections, initize the decoder with defaults
{
decoder->cfhddata.cfhd_subtype = 2; //RGB
decoder->cfhddata.num_channels = 3;
}
else if(GetTuplet(input->lpCurrentBuffer, input->nWordsUsed, CODEC_TAG_INPUT_FORMAT, &value))
{
if(value == COLOR_FORMAT_RG48)
{
decoder->cfhddata.cfhd_subtype = 2; //RGB
decoder->cfhddata.num_channels = 3;
}
else if(value == COLOR_FORMAT_RG64)
{
decoder->cfhddata.cfhd_subtype = 3; //RGBA
decoder->cfhddata.num_channels = 4;
}
else if(value > COLOR_FORMAT_BAYER && value < COLOR_FORMAT_BAYER_END)
{
unsigned int format = BAYER_FORMAT_RED_GRN;
decoder->cfhddata.cfhd_subtype = 1; //BAYER
decoder->cfhddata.bayer_format = format; // default to Red-Grn
decoder->cfhddata.version = CFHDDATA_VERSION;
}
}
}
}
OverrideCFHDDATA(decoder, input->lpCurrentBuffer, input->nWordsUsed);
if(decoder->image_dev_only) // HACK we need to support 3D also.
decoder->source_channels = 1;
else
decoder->source_channels = decoder->real_channels = SkipVideoChannel(decoder, input, 0);
if(!decoder->basic_only && (decoder->cfhddata.MSChannel_type_value || decoder->cfhddata.MSCTV_Override))
{
//int channels = 0;
int channel_blend_type = BLEND_NONE;
int channel_swapped_flags = 0;
if(decoder->cfhddata.MSCTV_Override)
{
channel_mask = decoder->cfhddata.MSCTV_Override&0xff;
channel_blend_type = ((decoder->cfhddata.MSCTV_Override>>8) & 0xff);
channel_swapped_flags = ((decoder->cfhddata.MSCTV_Override>>16) & 0xffff);
}
else
{
channel_mask = decoder->cfhddata.MSChannel_type_value&0xff;
channel_blend_type = ((decoder->cfhddata.MSChannel_type_value>>8) & 0xff);
channel_swapped_flags = ((decoder->cfhddata.MSChannel_type_value>>16) & 0xffff);
}
if(channel_mask != 3)
{
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
//if(channels >= 2) // even "mono" files need to be displayed as Stereo if a 3D mode is selected //DAN20090302
{
if(channel_mask == 1 && decoder->source_channels >= 2) // Decode Left only
{
if(decoder->cfhddata.FramingFlags & 2) // channel swap
{
SkipVideoChannel(decoder, input, 2); // 3D work
}
}
else if(channel_mask == 2 && decoder->source_channels >= 2) // Decode Right only
{
if(decoder->cfhddata.FramingFlags & 2) // channel swap
{
SkipVideoChannel(decoder, input, 1); // 3D work
}
else
{
//assume second channel decode
SkipVideoChannel(decoder, input, 2); // 3D work
}
channel_current = 1;
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
else if(channel_mask == 2 && decoder->source_channels <= 1) // Decode 2D as Right channel
{
channel_current = 1;
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
else if((channel_mask&3) == 3) // A+B 3d work
{
channel_decodes = 2;
decoder->channel_mix_half_res = 0;
if(channel_blend_type != BLEND_NONE)
{
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
//if(decoder->frame.format == DECODED_FORMAT_W13A)
// {
// decoder->frame.format = internal_format = DECODED_FORMAT_W13A;
// }
//else
//{
// decoder->frame.format = internal_format = DECODED_FORMAT_RG64;
// }
decoder->frame.format = internal_format = DECODED_FORMAT_RGB32;
local_pitch = decoder->frame.width * 4;
}
else
{
decoder->frame.format = internal_format = DECODED_FORMAT_RGB24;
local_pitch = decoder->frame.width * 3; //RGB24
}
/* if(decoder->frame.resolution == DECODED_RESOLUTION_FULL &&
(output_format == DECODED_FORMAT_YUYV ||
output_format == DECODED_FORMAT_UYVY))
{
if( channel_blend_type == BLEND_FREEVIEW ||
((channel_blend_type == BLEND_STACKED_ANAMORPHIC ||
channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC ||
channel_blend_type == BLEND_LINE_INTERLEAVED) && decoder->frame.width > 1280))
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF;
decoder->channel_mix_half_res = 1;
decoder->frame.width /= 2;
decoder->frame.height /= 2;
local_pitch = (decoder->frame.width) * 3; //RGB24
}
} */
}
/* if(channel_blend_type == BLEND_STEREO_YUY2inRGBA) //YUY2 in RGBA
{
decoder->frame.format = internal_format = DECODED_FORMAT_YUYV;
local_pitch = decoder->frame.width * 2; //YUY2
channel_offset = local_pitch * (decoder->frame.height);
use_local_buffer = true;
}*/
/* DAN20120316 FLAG3D_HALFRES broken if(decoder->frame.resolution == DECODED_RESOLUTION_FULL && channel_swapped_flags & FLAG3D_HALFRES && output_format != DECODED_FORMAT_W13A)
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF;
decoder->channel_mix_half_res = 1;
decoder->frame.width /= 2;
decoder->frame.height /= 2;
local_pitch /= 2;
} */
if( decoder->frame.resolution == DECODED_RESOLUTION_FULL &&
(channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || channel_blend_type == BLEND_FREEVIEW))
{
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if(decoder->sample_uncompressed)
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF;
decoder->channel_mix_half_res = 1;
decoder->frame.width /= 2;
decoder->frame.height /= 2;
local_pitch /= 2;
}
else
{
if(decoder->preformatted_3D_type > BLEND_NONE)
{
// leave as is.
}
else if(FORMAT8BIT(output_format))
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF_HORIZONTAL;
decoder->frame.width /= 2;
local_pitch /= 2;
}
}
}
else
{
if(FORMAT8BIT(output_format))
decoder->frame.resolution = DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER;
}
//TODO int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed;
}
if(channel_blend_type >= BLEND_STACKED_ANAMORPHIC && channel_blend_type < BLEND_ANAGLYPH_RC)// stacked, side-by-side, fields, Onion, YUY2
{
channel_offset = local_pitch * (decoder->frame.height);
}
else if(channel_blend_type >= BLEND_ANAGLYPH_RC)
{
/* if(channel_blend_type & 1 && channel_blend_type <= 21) // B&W Anaglyph
{
//B&W using YUYV
decoder->frame.format = internal_format = DECODED_FORMAT_YUYV;
local_pitch = decoder->frame.width * 2; //YUY2
}*/
channel_offset = local_pitch * (decoder->frame.height);
use_local_buffer = true;
}
else if(channel_blend_type == BLEND_NONE) // double high
{
channel_offset = pitch * decoder->frame.height;
}
else
{
channel_blend_type = BLEND_STACKED_ANAMORPHIC;
channel_offset = pitch * (decoder->frame.height/2);
}
// fields, stacked, etc, only works on full or half res.
if (channel_blend_type > BLEND_NONE && channel_blend_type <= BLEND_LINE_INTERLEAVED &&
decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY) //thumnbail.
{
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
if (channel_blend_type != BLEND_NONE &&
(output_format == DECODED_FORMAT_BYR1 ||
output_format == DECODED_FORMAT_BYR2 ||
output_format == DECODED_FORMAT_BYR3 ||
output_format == DECODED_FORMAT_BYR4 ))
{
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
}
}
decoder->channel_decodes = channel_decodes;
decoder->channel_blend_type = channel_blend_type;
decoder->channel_swapped_flags = channel_swapped_flags;
}
else
{
decoder->channel_decodes = channel_decodes = 1;
decoder->channel_blend_type = BLEND_NONE;
decoder->channel_swapped_flags = 0;
}
if(cfhddata) // So the P-frames can know the bayerformat
{
//int size = cfhddata->size;
size_t size = cfhddata->size;
if (size > sizeof(CFHDDATA)) {
size = sizeof(CFHDDATA);
}
memcpy(cfhddata, &decoder->cfhddata, size);
}
{
bool doOrientation = true;
bool doFraming = true;
bool doBurins = true;
bool doImageflips = true;
bool doGhostBust = false;
bool doPrimaries = true;
int process_path_flags = decoder->cfhddata.process_path_flags;
int process_path_flags_mask = decoder->cfhddata.process_path_flags_mask;
if(decoder->basic_only)
{
doOrientation = false;
doFraming = false;
doBurins = false;
doImageflips = false;
doPrimaries = false;
}
else
{
if(decoder->cfhddata.process_path_flags_mask)
{
//DAN20101007 --
if(process_path_flags == 0)
decoder->cfhddata.process_path_flags = process_path_flags = decoder->cfhddata.process_path_flags_mask;
process_path_flags &= decoder->cfhddata.process_path_flags_mask;
if(process_path_flags_mask & PROCESSING_ACTIVE2)
{
if(!(process_path_flags_mask & PROCESSING_ORIENTATION))
doOrientation = false;
if(!(process_path_flags_mask & PROCESSING_FRAMING))
doFraming = false;
if(!(process_path_flags_mask & PROCESSING_BURNINS))
doBurins = false;
if(!(process_path_flags_mask & PROCESSING_IMAGEFLIPS))
doImageflips = false;
}
if(!(process_path_flags_mask & PROCESSING_COLORMATRIX))
doPrimaries = false;
}
if(process_path_flags & PROCESSING_ACTIVE2)
{
if(!(process_path_flags & PROCESSING_ORIENTATION))
doOrientation = false;
if(!(process_path_flags & PROCESSING_FRAMING))
doFraming = false;
if(!(process_path_flags & PROCESSING_BURNINS))
doBurins = false;
if(!(process_path_flags & PROCESSING_IMAGEFLIPS))
doImageflips = false;
if(!(process_path_flags & PROCESSING_COLORMATRIX))
doPrimaries = false;
}
}
if(doOrientation)
process_path_flags |= PROCESSING_ORIENTATION;
if(doFraming)
process_path_flags |= PROCESSING_FRAMING;
if(doBurins)
process_path_flags |= PROCESSING_BURNINS;
if(doImageflips)
process_path_flags |= PROCESSING_IMAGEFLIPS;
if(doPrimaries)
process_path_flags |= PROCESSING_COLORMATRIX;
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
doGhostBust = true;
}
}
decoder->cfhddata.process_path_flags = process_path_flags;
if((!decoder->basic_only &&
(doOrientation && ( decoder->cfhddata.channel[0].FloatingWindowMaskL ||
decoder->cfhddata.channel[0].FloatingWindowMaskR ||
decoder->cfhddata.channel[0].FrameKeyStone ||
decoder->cfhddata.channel[0].FrameTilt ||
decoder->cfhddata.channel[0].HorizontalOffset ||
decoder->cfhddata.channel[0].VerticalOffset ||
decoder->cfhddata.channel[0].RotationOffset ||
decoder->cfhddata.channel[1].FloatingWindowMaskL ||
decoder->cfhddata.channel[1].FloatingWindowMaskR ||
decoder->cfhddata.channel[1].FrameKeyStone ||
decoder->cfhddata.channel[1].FrameTilt ||
decoder->cfhddata.channel[1].HorizontalOffset ||
decoder->cfhddata.channel[1].VerticalOffset ||
decoder->cfhddata.channel[1].RotationOffset ||
decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[2].FloatingWindowMaskL ||
decoder->cfhddata.channel[2].FloatingWindowMaskR ||
decoder->cfhddata.channel[2].FrameKeyStone ||
decoder->cfhddata.channel[2].FrameTilt ||
decoder->cfhddata.channel[2].HorizontalOffset ||
decoder->cfhddata.channel[2].VerticalOffset ||
decoder->cfhddata.channel[2].RotationOffset ||
decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0)))
||
(doPrimaries && ( decoder->cfhddata.channel[0].user_blur_sharpen != 0.0 ||
decoder->cfhddata.channel[1].user_blur_sharpen != 0.0 ||
decoder->cfhddata.channel[2].user_blur_sharpen != 0.0))
||
(doFraming && ( decoder->cfhddata.channel[0].user_vignette_start != 0.0 ||
decoder->cfhddata.channel[1].user_vignette_start != 0.0 ||
decoder->cfhddata.channel[2].user_vignette_start != 0.0))
||
(doFraming && ( memcmp(&decoder->cfhddata.channel[0].FrameMask, &emptyFrameMask, 32) ||
decoder->cfhddata.FrameOffsetX ||
decoder->cfhddata.FrameOffsetY ||
decoder->cfhddata.FrameOffsetR ||
decoder->cfhddata.FrameHScale != 1.0 ||
decoder->cfhddata.FrameHDynamic != 1.0 ||
decoder->cfhddata.channel[1].FrameZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameZoom != 1.0))
||
(doGhostBust && (decoder->channel_blend_type == BLEND_NONE) && (channel_decodes == 2))
||
(doImageflips && decoder->cfhddata.channel_flip)
||
(decoder->preformatted_3D_type == BLEND_STACKED_ANAMORPHIC) ||
(decoder->preformatted_3D_type == BLEND_SIDEBYSIDE_ANAMORPHIC) ||
(decoder->channel_blend_type && decoder->frame.resolution == DECODED_RESOLUTION_QUARTER) || // 3D mode generally don't work in quarter res -- this prevents crashes.
( ((decoder->frame.width+7)/8)*8 != decoder->frame.width || (channel_decodes > 1 && decoder->channel_blend_type != BLEND_NONE) ||
decoder->sample_uncompressed) ||
(decoder->cfhddata.doMesh)
)
{
if( output_format == DECODED_FORMAT_BYR1 ||
output_format == DECODED_FORMAT_BYR2 ||
output_format == DECODED_FORMAT_BYR3 ||
output_format == DECODED_FORMAT_BYR4 )
{
// no manipulation should be applied
}
else
{
use_local_buffer = true;
local_pitch = ((decoder->frame.width+7)/8)*8 * 6; //RGB48
if(decoder->image_dev_only)
{
decoder->frame.white_point = 13;
decoder->frame.format = internal_format = DECODED_FORMAT_WP13;
}
else if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
decoder->frame.white_point = 13;
decoder->frame.format = internal_format = DECODED_FORMAT_W13A;
local_pitch = ((decoder->frame.width+7)/8)*8 * 8;
}
else
{
decoder->frame.white_point = 13;
decoder->frame.format = internal_format = DECODED_FORMAT_WP13;
}
if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL ||
decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
local_pitch *= 2; // need horizontal room to make 3D side by side frame
}
/*
if(output_format == DECODED_FORMAT_WP13 || output_format == DECODED_FORMAT_W13A)
{
// preserve HDR
decoder->frame.format = internal_format = output_format;//DECODED_FORMAT_WP13; // HDR output
if(output_format == DECODED_FORMAT_W13A)
local_pitch = decoder->frame.width * 8;
}
else
{
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
decoder->frame.format = internal_format = DECODED_FORMAT_RG64;
local_pitch = decoder->frame.width * 8;
}
else
{
decoder->frame.format = internal_format = DECODED_FORMAT_RG48;
}
}*/
channel_offset = local_pitch * (decoder->frame.height);
}
}
}
if(output_format == DECODED_FORMAT_BYR4 && decoder->cfhddata.encode_curve_preset == 0)
{
if(decoder->BYR4LinearRestore == NULL)
{
int j,val;
int encode_curve_type = decoder->cfhddata.encode_curve >> 16;
//int encode_curve_neg = encode_curve_type & CURVE_TYPE_NEGATIVE;
float encode_curvebase;
if(encode_curve_type) //1 or 2
{
if(encode_curve_type & CURVE_TYPE_EXTENDED)
encode_curvebase = (float)(decoder->cfhddata.encode_curve & 0xffff); // use all 16-bits for larger log bases
else
encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff);
}
else
{
encode_curve_type = CURVE_TYPE_LOG;
encode_curvebase = 90.0;
}
#if _ALLOCATOR
decoder->BYR4LinearRestore = (unsigned short *)AllocAligned(decoder->allocator,16384*2, 16);
#else
decoder->BYR4LinearRestore = (unsigned short *)MEMORY_ALIGNED_ALLOC(16384*2, 16);
#endif
for(j=0; j<16384; j++) //0 to 1
{
switch(encode_curve_type & CURVE_TYPE_MASK)
{
case CURVE_TYPE_LOG:
val = (int)(CURVE_LOG2LIN((float)j/16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_GAMMA:
val = (int)(CURVE_GAM2LIN((float)j/16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_CINEON:
val = (int)(CURVE_CINEON2LIN((float)j/16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_CINE985:
val = (int)(CURVE_CINE9852LIN((float)j/16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_PARA:
val = (int)(CURVE_PARA2LIN((float)j/16384.0f,
(int)((decoder->cfhddata.encode_curve >> 8) & 0xff), (int)(decoder->cfhddata.encode_curve & 0xff)) * 65535.0f);
break;
case CURVE_TYPE_CSTYLE:
val = (int)(CURVE_CSTYLE2LIN((float)j/16384.0f,
(int)((decoder->cfhddata.encode_curve >> 8) & 0xff)) * 65535.0f);
break;
case CURVE_TYPE_SLOG:
val = (int)(CURVE_SLOG2LIN((float)j/16384.0f) * 65535.0f);
break;
case CURVE_TYPE_LOGC:
val = (int)(CURVE_LOGC2LIN((float)j/16384.0f) * 65535.0f);
break;
case CURVE_TYPE_LINEAR:
default:
val = j;
break;
}
if(val < 0) val = 0;
if(val > 65535) val = 65535;
decoder->BYR4LinearRestore[j] = val;
}
}
}
//DAN20120319 - removed
/*if(decoder->channel_mix_half_res) //decoding half but scaling to double the output size
{
local_pitch *= 2;
channel_offset = local_pitch * (decoder->frame.height*2);
}*/
if(use_local_buffer == true) // need buffer for anaglyph and other 3D presentation formats
{
int stereoframesize = channel_offset * channel_decodes/*stacked frames*/;
if(decoder->source_channels == 1 && decoder->preformatted_3D_type == BLEND_NONE)
stereoframesize = channel_offset;
if(channel_decodes == 1 && decoder->preformatted_3D_type != BLEND_NONE)
stereoframesize = channel_offset * 2;
if(channel_decodes == 2 && decoder->source_channels == 1 && decoder->channel_blend_type != BLEND_NONE)
stereoframesize = channel_offset * 2;
if(decoder->StereoBuffer==NULL || decoder->StereoBufferSize < stereoframesize)
{
#if _ALLOCATOR
if(decoder->StereoBuffer)
{
FreeAligned(decoder->allocator, decoder->StereoBuffer);
decoder->StereoBuffer = NULL;
}
decoder->StereoBuffer = (PIXEL16U *)AllocAligned(decoder->allocator, stereoframesize+256, 16); //DAN20130517 add 256, as 2.7K half we are write off the buffers end for zoom, don't know why yet.
#else
if(decoder->StereoBuffer)
{
MEMORY_ALIGNED_FREE(decoder->StereoBuffer);
decoder->StereoBuffer = NULL;
}
decoder->StereoBuffer = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(stereoframesize+256, 16); //DAN20130517 add 256, as 2.7K half we are write off the buffers end for zoom, don't know why yet.
#endif
assert(decoder->StereoBuffer != NULL);
if (! (decoder->StereoBuffer != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->StereoBufferSize = stereoframesize;
}
decoder->StereoBufferFormat = internal_format;
local_buffer = (uint8_t *)decoder->StereoBuffer;
local_output = local_buffer;
}
DecodeEntropyInit(decoder);
//swapped -- Maybe useful for double height decodes.
/* if(channel_decodes == 2 && channel_swapped_flags & FLAG3D_SWAPPED)
{
local_output += channel_offset;
channel_offset = -channel_offset;
}*/
decoder->use_local_buffer = use_local_buffer ? 1 : 0;
if(channel_decodes == 2 && decoder->parallelDecoder == NULL && decoder->source_channels > 1)
{
int encoded_width = decoder->frame.width;
int encoded_height = decoder->frame.height;
if (decoder->frame.resolution == DECODED_RESOLUTION_HALF)
{
// Compute the encoded dimensions from the frame dimensions
encoded_width *= 2;
encoded_height *= 2;
}
else if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER)
{
// Compute the encoded dimensions from the frame dimensions
encoded_width *= 4;
encoded_height *= 4;
}
else if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
// Compute the encoded dimensions from the frame dimensions
encoded_width *= 2;
}
else if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_VERTICAL)
{
// Compute the encoded dimensions from the frame dimensions
encoded_height *= 2;
}
#if _ALLOCATOR
decoder->parallelDecoder = (DECODER *)Alloc(decoder->allocator, sizeof(DECODER));
if(decoder->parallelDecoder)
{
memset(decoder->parallelDecoder, 0, sizeof(DECODER));
DecodeInit(decoder->allocator, decoder->parallelDecoder, encoded_width, encoded_height,
internal_format, DECODED_RESOLUTION_FULL, NULL);
}
#else
decoder->parallelDecoder = (DECODER *)MEMORY_ALLOC(sizeof(DECODER));
if(decoder->parallelDecoder)
{
memset(decoder->parallelDecoder, 0, sizeof(DECODER));
decoder->parallelDecoder->thread_cntrl = decoder->thread_cntrl;
DecodeInit(decoder->parallelDecoder, encoded_width, encoded_height,
internal_format, DECODED_RESOLUTION_FULL, NULL);
}
#endif
}
// Using the parallel decoder?
if (decoder->parallelDecoder)
{
// Initialize the parallel decoder with parameters from the regular decoder
memcpy(&decoder->parallelDecoder->cfhddata, &decoder->cfhddata, sizeof(CFHDDATA));
memcpy(decoder->parallelDecoder->licensekey,decoder->licensekey, 16);
DecodeEntropyInit(decoder->parallelDecoder);
DecodeOverrides(decoder->parallelDecoder, decoder->overrideData, decoder->overrideSize);
decoder->parallelDecoder->channel_decodes = decoder->channel_decodes;
decoder->parallelDecoder->channel_blend_type = decoder->channel_blend_type;
decoder->parallelDecoder->flags = decoder->flags;
decoder->parallelDecoder->frame = decoder->frame;
decoder->parallelDecoder->use_local_buffer = use_local_buffer ? 1 : 0;
decoder->parallelDecoder->codec.encoded_format = decoder->codec.encoded_format;
if(decoder->parallelDecoder->decoder_thread.pool.thread_count == 0)
{
CreateLock(&decoder->parallelDecoder->decoder_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->parallelDecoder->decoder_thread.pool,
1, //
ParallelThreadProc,
decoder->parallelDecoder);
}
}
if(channel_decodes == 2 && decoder->real_channels > 1 && decoder->parallelDecoder && decoder->parallelDecoder->decoder_thread.pool.thread_count)
{
// Second stream as a thread.
BITSTREAM second_input = *input;
if(decoder->cfhddata.FramingFlags & 2 && decoder->source_channels >= 2) // channel swap
{
BITSTREAM leftEye_input = *input;
SkipVideoChannel(decoder, &leftEye_input, 2); // 3D work
*input = leftEye_input;
SkipVideoChannel(decoder, &second_input, 1); // 3D work
}
else
SkipVideoChannel(decoder, &second_input, 2); // 3D work
decoder->channel_current = 0;
decoder->parallelDecoder->channel_current = 1;
// Instead of reading the metadata databases again, use the ones in the main decoder
OverrideCFHDDATAUsingParent(decoder->parallelDecoder, decoder, input->lpCurrentBuffer, input->nWordsUsed);
// DAN20110404 Use left (first) eye metadata for both eyes (just in case right GUID is bad.)
// OverrideCFHDDATA(decoder->parallelDecoder, input->lpCurrentBuffer, input->nWordsUsed);
//OverrideCFHDDATA(decoder->parallelDecoder, second_input.lpCurrentWord, second_input.nWordsUsed);
// Hack, this gets lost
decoder->parallelDecoder->cfhddata.split_CC_position = decoder->cfhddata.split_CC_position;
#if (_THREADED && _GRAPHICS)
if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output)
{
if(decoder->cfhddata.BurninFlags & 3) // overlays / tools
{
DrawStartThreaded(decoder);
}
}
#endif
// Post a message to the mailbox
decoder->parallelDecoder->decoder_thread.input = &second_input;
if(use_local_buffer == false &&
(decoder->frame.format == DECODED_FORMAT_RGB32 || decoder->frame.format == DECODED_FORMAT_RGB24))
{
decoder->parallelDecoder->decoder_thread.output = local_output;
local_output += channel_offset;
}
else
{
decoder->parallelDecoder->decoder_thread.output = local_output + channel_offset;
}
decoder->parallelDecoder->decoder_thread.pitch = local_pitch;
decoder->parallelDecoder->decoder_thread.colorparams = colorparams;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->parallelDecoder->decoder_thread.pool, 1);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->parallelDecoder->decoder_thread.pool, THREAD_MESSAGE_START);
// do the first channel
{
TAGVALUE segment;
int sample_type;
#if _THREADED
decoder->entropy_worker_new.next_queue_num = 0;
decoder->entropy_worker_new.threads_used = 0;
#endif
// Get the type of sample
segment = GetTagValue(input);
assert(segment.tuple.tag == CODEC_TAG_SAMPLE);
if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) {
decoder->error = CODEC_ERROR_BITSTREAM;
STOP(tk_decompress);
return false;
}
sample_type = segment.tuple.value;
switch (sample_type)
{
case SAMPLE_TYPE_GROUP: // Group of frames (decode the first frame)
result = DecodeSampleGroup(decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_FRAME: // Decode the second or later frame in a group
result = DecodeSampleFrame(decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_IFRAME: // Decode a sample that represents an isolated frame
result = DecodeSampleIntraFrame(decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
// The video sequence header is ignored
result = true;
break;
default:
// Need to fill the output frame
//error = CODEC_ERROR_SAMPLE_TYPE;
result = false;
}
}
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->parallelDecoder->decoder_thread.pool);
}
else
{
while(channel_decodes > 0)
{
TAGVALUE segment;
int sample_type;
local_decoder->channel_current = channel_current++;
//OverrideCFHDDATA(local_decoder, input->lpCurrentBuffer, input->nWordsUsed);
#if (_THREADED && _GRAPHICS)
if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output)
{
if(decoder->cfhddata.BurninFlags & 3) //overlays / tools
{
DrawStartThreaded(decoder);
}
}
#endif
#if _THREADED
local_decoder->entropy_worker_new.next_queue_num = 0;
local_decoder->entropy_worker_new.threads_used = 0;
#endif
if(decoder->image_dev_only)
{
result = DecodeSampleIntraFrame(local_decoder, input, local_output, local_pitch, colorparams);
}
else
{
// Get the type of sample
segment = GetTagValue(input);
assert(segment.tuple.tag == CODEC_TAG_SAMPLE);
if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) {
local_decoder->error = CODEC_ERROR_BITSTREAM;
STOP(tk_decompress);
return false;
}
sample_type = segment.tuple.value;
switch (sample_type)
{
case SAMPLE_TYPE_GROUP: // Group of frames (decode the first frame)
result = DecodeSampleGroup(local_decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_FRAME: // Decode the second or later frame in a group
result = DecodeSampleFrame(local_decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_IFRAME: // Decode a sample that represents an isolated frame
result = DecodeSampleIntraFrame(local_decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
// The video sequence header is ignored
result = true;
break;
default:
// Need to fill the output frame
//error = CODEC_ERROR_SAMPLE_TYPE;
result = false;
}
}
if(ConvertPreformatted3D(decoder, use_local_buffer, internal_format, channel_mask, local_output, local_pitch, &channel_offset))
{
channel_decodes = 0;
}
else
{
channel_decodes--;
local_output += channel_offset;
if(decoder->parallelDecoder)
{
local_decoder = decoder->parallelDecoder;
}
}
}
}
if(use_local_buffer && output)
{
decoder->use_local_buffer = 0;
#if WARPSTUFF
WarpFrame(decoder, local_buffer, local_pitch, decoder->StereoBufferFormat);
MaskFrame(decoder, local_buffer, local_pitch, decoder->StereoBufferFormat);
#endif
ConvertLocalToOutput(decoder, output, pitch, output_format, local_buffer, local_pitch, abs(channel_offset));
}
else
{
#if WARPSTUFF
WarpFrame(decoder, output, pitch, output_format);
MaskFrame(decoder, output, pitch, output_format);
#endif
}
if(decoder->channel_mix_half_res) //HACK
{
decoder->frame.resolution = DECODED_RESOLUTION_FULL;
decoder->frame.width *= 2;
decoder->frame.height *= 2;
decoder->channel_mix_half_res = 0;
}
if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) //HACK
{
decoder->frame.resolution = DECODED_RESOLUTION_FULL;
decoder->frame.width *= 2;
}
if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) //HACK
{
decoder->frame.resolution = DECODED_RESOLUTION_FULL;
}
#if _GRAPHICS
if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output)
{
PaintFrame(decoder, output, pitch, output_format);
}
#endif
STOP(tk_decompress);
// Return indication of whether decoding succeeded or failed
return result;
}
// Decode a sample that encoded a group of frames (return the first frame)
bool DecodeSampleGroup(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int32_t frame_size = decoder->frame.height * pitch;
int resolution = decoder->frame.resolution;
bool result = true;
static int subband_wavelet_index[] = {5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 1, 1, 1, 0, 0, 0};
static int subband_band_index[] = {0, 1, 2, 3, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3};
int num_subbands = sizeof(subband_wavelet_index)/sizeof(subband_wavelet_index[0]);
#if (0 && DEBUG)
// Force quarter resolution decoding for debug that feature
resolution = DECODED_RESOLUTION_QUARTER;
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoding sample group\n");
}
#endif
START(tk_decoding);
// Initialize the codec state
InitCodecState(&decoder->codec);
// Allocate the transform data structure for the group of frames
AllocDecoderGroup(decoder);
// Initialize the tables for decoding the wavelet transforms
InitWaveletDecoding(decoder, subband_wavelet_index, subband_band_index, num_subbands);
// Clear the flags in the wavelet transforms
ClearTransformFlags(decoder);
// Process the tag value pairs until an encoded subband is found
for (;;)
{
TAGVALUE segment;
// Read the next tag value pair from the bitstream
//segment = GetTagValue(input);
segment = GetSegment(input);
assert(input->error == BITSTREAM_ERROR_OKAY);
if (input->error != BITSTREAM_ERROR_OKAY) {
decoder->error = CODEC_ERROR_BITSTREAM;
result = false;
break;
}
// Update the codec state with the information in the tag value pair
{
TAGWORD tag = segment.tuple.tag;
TAGWORD value = segment.tuple.value;
// Use the tag value pair to update the codec state
error = UpdateCodecState(decoder, input, codec, tag, value);
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY)
{
decoder->error = error;
result = false;
break;
//NOTE: Consider moving the error code into the codec state
}
}
// Check whether the group has been decoded
if (codec->sample_done) break;
// Skip the rest of the current channel?
if (CanSkipChannel(decoder, resolution))
{
if(codec->channel == 3 && (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY))
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
codec->num_channels = 3;
goto decoding_complete;
}
else
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
else
{
// Compute the bitstream position after the current channel
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Get the temporal wavelet
int temporal_index = 2;
TRANSFORM *transform = decoder->transform[channel];
IMAGE *wavelet = transform->wavelet[temporal_index];
#if (0 && DEBUG)
if (IsBandValid(wavelet, HIGHPASS_BAND))
{
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
sprintf(label, "Temporal-decode-%d-", count);
DumpBandPGM(label, wavelet, HIGHPASS_BAND, NULL);
}
count++;
}
#endif
#if _THREADED_DECODER
// Ready to invert this wavelet to get the lowpass band in the lower wavelet?
//if (DecodedBandsValid(wavelet, temporal_index))
if (resolution != DECODED_RESOLUTION_QUARTER || (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER))
#else
// Have all bands in the temporal wavelet been decoded?
//if (wavelet && BANDS_ALL_VALID(wavelet))
if (AllBandsValid(wavelet))
#endif
{
//PIXEL *buffer = (PIXEL *)decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Reconstructing the lowpass bands in the first level wavelets\n");
}
#endif
#if _THREADED_DECODER
// Add the temporal inverse transform to the processing queue
if(decoder->entropy_worker_new.pool.thread_count)
{
ReconstructWaveletBand(decoder, transform, channel, wavelet, temporal_index,
precision, &decoder->scratch, 1);
QueueThreadedTransform(decoder, channel, temporal_index);
}
else
#endif
{
// Reconstruct the lowpass bands in the first level wavelets
//ReconstructWaveletBand(transform, channel, wavelet, temporal_index, precision, buffer, buffer_size);
ReconstructWaveletBand(decoder, transform, channel, wavelet, temporal_index,
precision, &decoder->scratch, 0 );
}
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
// Note that the subband flags are also reset when the channel header is decoded
}
// Was the wavelet created?
else if (wavelet == NULL)
{
// The temporal wavelet is not created during quarter resolution decoding
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
//TODO: Improve quarter resolution decoding so that the wavelet is created?
}
}
}
decoding_complete:
STOP(tk_decoding);
#if (0 && DEBUG)
if (logfile)
{
char label[_MAX_PATH];
int channel;
for (channel = 0; channel < codec->num_channels; channel++)
{
TRANSFORM *transform = decoder->transform[channel];
IMAGE *wavelet = transform->wavelet[2];
uint8_t *data = (uint8_t *)wavelet->band[HIGHPASS_BAND];
int height = wavelet->height;
int pitch = wavelet->pitch;
int size = height * pitch;
int band;
for (band = 0; band < wavelet->num_bands; band++)
{
sprintf(label, "Temporal channel: %d, band: %d", channel, band);
DumpBandStatistics(label, wavelet, band, logfile);
#if 0
sprintf(label, "Temporal-channel%d-band%d-", channel, band);
DumpBandPGM(label, wavelet, band, NULL);
#endif
}
assert(size > 0);
ZeroMemory(data, size);
}
}
#endif
if (result)
{
// Two frames have been decoded
decoder->gop_length = 2;
decoder->frame_count += 2;
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile,
"DecodeSampleGroup, decoder: 0x%p, GOP length: %d\n",
decoder, decoder->gop_length);
}
#endif
// Return the first frame in the group
if (!decoder->no_output)
{
#if 0
// Decoding to quarter frame resolution at full frame rate?
if (resolution == DECODED_RESOLUTION_QUARTER)
{
int num_channels = codec->num_channels;
FRAME_INFO *info = &decoder->frame;
char *buffer = decoder->buffer;
size_t buffer_size = decoder->buffer_size;
uint8_t *frame1 = output;
uint8_t *frame2 = decoder->output2;
assert(frame2 != NULL);
// Reconstruct two frames at quarter resolution
ReconstructQuarterFrame(decoder, num_channels,
frame1, frame2, pitch,
info, buffer, buffer_size);
}
else
#endif
// Finish computing the output frame
ReconstructSampleFrameToBuffer(decoder, 0, output, pitch);
}
if (decoder->error != CODEC_ERROR_OKAY) {
result = false;
}
#if TIMING
// Increment the count of bytes that have been decoded
decode_byte_count += (COUNTER)BitstreamByteCount(input);
#endif
}
if (!result)
{
// Check that the frame can be cleared
assert(frame_size > 0);
if (frame_size > 0)
{
// Zero the frame
memset(output, 0, frame_size);
}
}
return result;
}
// Decode a sample that represents the second frame in a group
bool DecodeSampleFrame(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int32_t frame_size = decoder->frame.height * pitch;
bool result = true;
START(tk_decoding);
// Decode the tag value pairs in the frame sample
for (;;)
{
TAGWORD tag;
TAGWORD value;
// Read the next tag value pair from the bitstream
//TAGVALUE segment = GetTagValue(input);
TAGVALUE segment = GetSegment(input);
assert(input->error == BITSTREAM_ERROR_OKAY);
if (input->error != BITSTREAM_ERROR_OKAY) {
decoder->error = CODEC_ERROR_BITSTREAM;
result = false;
break;
}
// Update the codec state with the information in the tag value pair
tag = segment.tuple.tag;
value = segment.tuple.value;
// Use the tag value pair to update the codec state
error = UpdateCodecState(decoder, input, codec, tag, value);
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY) {
decoder->error = error;
result = false;
break;
}
// End of the frame header?
if (tag == CODEC_TAG_FRAME_INDEX) break;
}
STOP(tk_decoding);
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile,
"DecodeSampleFrame, decoder: 0x%p, GOP length: %d\n",
decoder, decoder->gop_length);
}
#endif
if (result)
{
// Return the second frame in the group
// assert(decoder->gop_length >= 2);
if (decoder->gop_length >= 2)
{
int frame_index = 1; // Display the second frame in the group
ReconstructSampleFrameToBuffer(decoder, frame_index, output, pitch);
if (decoder->error != CODEC_ERROR_OKAY) {
result = false;
}
}
else if (decoder->gop_length > 0)
{
int frame_index = 0; // Display the first frame in the group
ReconstructSampleFrameToBuffer(decoder, frame_index, output, pitch);
if (decoder->error != CODEC_ERROR_OKAY) {
result = false;
}
}
#if TIMING
// Increment the count of bytes that have been decoded
decode_byte_count += (COUNTER)BitstreamByteCount(input);
#endif
}
if (!result)
{
// Frame type that is not handled
// Check that the frame can be cleared
assert(frame_size > 0);
if (frame_size > 0)
{
// Zero the frame
memset(output, 0, frame_size);
}
}
return result;
}
// Decode a sample that encodes an intra frame
bool DecodeSampleIntraFrame(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int32_t frame_size = decoder->frame.height * pitch;
int resolution = decoder->frame.resolution;
bool result = true;
static int subband_wavelet_index[] = {2, 2, 2, 2, 1, 1, 1, 0, 0, 0};
static int subband_band_index[] = {0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3};
int num_subbands = sizeof(subband_wavelet_index)/sizeof(subband_wavelet_index[0]);
START(tk_decoding);
if(decoder->image_dev_only) goto decoding_completeI;
// Initialize the codec state
InitCodecState(&decoder->codec);
// Allocate the transform data structure for the group of frames
AllocDecoderGroup(decoder);
// Initialize the tables for decoding the wavelet transforms
InitWaveletDecoding(decoder, subband_wavelet_index, subband_band_index, num_subbands);
// Clear the flags in the wavelet transforms
ClearTransformFlags(decoder);
//Force V210 output for debugging ***DEBUG***
//decoder->frame.format = DECODED_FORMAT_V210;
// Process the tag value pairs until an encoded subband is found
for (;;)
{
TAGVALUE segment;
// Read the next tag value pair from the bitstream
segment = GetSegment(input);
assert(input->error == BITSTREAM_ERROR_OKAY);
if (input->error != BITSTREAM_ERROR_OKAY) {
decoder->error = CODEC_ERROR_BITSTREAM;
result = false;
break;
}
{
TAGWORD tag = segment.tuple.tag;
TAGWORD value = segment.tuple.value;
// Use the tag value pair to update the codec state
error = UpdateCodecState(decoder, input, codec, tag, value);
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY) {
decoder->error = error;
result = false;
break;
//NOTE: Consider moving the error code into the codec state
}
}
// Check whether the group has been decoded
if (codec->sample_done) {
break;
}
// Skip the rest of the current channel?
if (CanSkipChannel(decoder, resolution))
{
if(codec->channel == 3 && (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY))
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
codec->num_channels = 3;
goto decoding_completeI;
}
else if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
else
{
// Compute the bitstream position after the current channel
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Get the highest wavelet in the pyramid
int wavelet_index = 2;
TRANSFORM *transform = decoder->transform[channel];
IMAGE *wavelet = transform->wavelet[wavelet_index];
#if _THREADED_DECODER
// Ready to invert this wavelet to get the lowpass band in the lower wavelet?
//if (DecodedBandsValid(wavelet, temporal_index))
if (resolution != DECODED_RESOLUTION_QUARTER || (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER))
#else
// Have all bands in the wavelet been decoded?
if (AllBandsValid(wavelet))
#endif
{
//PIXEL *buffer = (PIXEL *)decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
int band;
sprintf(label, "Channel: %d, index: %d", channel, wavelet_index);
DumpImageStatistics(label, wavelet, logfile);
#if 1
for (band = 1; band < wavelet->num_bands; band++)
{
sprintf(label, "Channel: %d, index: %d, band: %d", channel, wavelet_index, band);
DumpBandStatistics(label, wavelet, band, logfile);
}
#endif
}
#endif
#if (0 & DEBUG)
if (logfile) {
fprintf(logfile, "Reconstructing the lowpass bands in the first level wavelets\n");
}
#endif
#if _THREADED_DECODER
// Add the inverse spatial transform to the processing queue
if(decoder->entropy_worker_new.pool.thread_count)
{
ReconstructWaveletBand(decoder, transform, channel, wavelet, wavelet_index,
precision, &decoder->scratch, 1);
QueueThreadedTransform(decoder, channel, wavelet_index);
}
else
#endif
{
// Reconstruct the lowpass bands in the first level wavelets
//ReconstructWaveletBand(transform, channel, wavelet, temporal_index, precision, buffer, buffer_size);
ReconstructWaveletBand(decoder, transform, channel, wavelet, wavelet_index,
precision, &decoder->scratch, 0);
}
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
// Note that the subband flags are also reset when the channel header is decoded
}
// Was the wavelet created?
//else if (wavelet == NULL)
else
{
// The wavelet may not have been created during quarter resolution decoding
// The wavelet should have been created if all bands are valid
assert(wavelet != NULL);
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
//TODO: Improve quarter resolution decoding so that the wavelet is created?
}
}
}
decoding_completeI:
STOP(tk_decoding);
if (result)
{
// One frame has been decoded
decoder->gop_length = 1;
decoder->frame_count += 1;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile,
"DecodeSampleIntraFrame, decoder: 0x%p, GOP length: %d\n",
decoder, decoder->gop_length);
}
#endif
// Return the first frame (the only frame that was decoded)
if (!decoder->no_output)
{
int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed;
if ( !uncompressed && resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER))
{
//CODEC_STATE *codec = &decoder->codec;
TRANSFORM **transform_array = decoder->transform;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
FRAME_INFO *info = &decoder->frame;
int precision = codec->precision;
#if _THREADED_DECODER
// Wait until the transform thread has finished all pending transforms
WaitForTransformThread(decoder);
#endif
ConvertQuarterFrameToBuffer(decoder, transform_array, num_channels, output, pitch, info, precision);
}
else
{
// Finish computing the output frame
ReconstructSampleFrameToBuffer(decoder, 0, output, pitch);
}
}
if (decoder->error != CODEC_ERROR_OKAY) {
result = false;
}
#if TIMING
// Increment the count of bytes that have been decoded
decode_byte_count += (COUNTER)BitstreamByteCount(input);
#endif
}
if (!result)
{
// Check that the frame can be cleared
assert(frame_size > 0);
if (frame_size > 0)
{
// Zero the frame
memset(output, 0, frame_size);
}
}
return result;
}
// Decode a sample channel header
bool DecodeSampleChannelHeader(DECODER *decoder, BITSTREAM *input)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_ERROR error = CODEC_ERROR_OKAY;
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
CHANNEL_HEADER header;
TRANSFORM *transform = decoder->transform[channel];
TRANSFORM *next_transform;
// Advance to the next channel
channel++;
// Get the next transform for decoded information
//TRANSFORM *next_transform = AllocGroupTransform(group, channel);
// Decode the rest of the channel header
error = DecodeChannelHeader(input, &header, SAMPLE_TYPE_CHANNEL);
assert(error == CODEC_ERROR_OKAY);
decoder->error = error;
if (error != CODEC_ERROR_OKAY) return false;
// The decoder is not able to skip channels
assert(header.channel == channel);
// Initialize the next transform using the previous one
next_transform = decoder->transform[channel];
InitChannelTransform(next_transform, transform);
// Update the channel
codec->channel = channel;
// Reset the subband counter
codec->band.subband = 0;
// Reset the decoded subband flags
codec->decoded_subband_flags = 0;
// Loop back to decode the next channel
//transform = next_transform;
return true;
}
// Decode the coefficients in a subband
bool DecodeSampleSubband(DECODER *decoder, BITSTREAM *input, int subband)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
TRANSFORM *transform = decoder->transform[channel];
int *subband_wavelet_index = decoder->subband_wavelet_index;
// Used for quarter resolution and threaded decoding
int transform_type = transform->type;
// Wavelet parameters
int width;
int height;
int level;
int type;
int band;
int threading = 1;
// Wavelet containing the band to decode
int index;
IMAGE *wavelet = NULL;
bool result;
if(subband >= 7 && subband <= 10 && transform_type == TRANSFORM_TYPE_FIELDPLUS)
threading = 0;
// Update the transform data structure from the codec state
UpdateCodecTransform(transform, codec);
// Is this an empty band?
if (subband == 255)
{
// Decode an empty band
// This wavelet is the temporal wavelet
index = 2;
wavelet = transform->wavelet[index];
// Get the wavelet parameters decoded from the bitstream
width = codec->band.width;
height = codec->band.height;
level = codec->highpass.wavelet_level;
type = codec->highpass.wavelet_type;
band = codec->band.number;
// The empty band should be the highpass band in a temporal wavelet
assert(type == WAVELET_TYPE_TEMPORAL && band == 1);
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type);
#else
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
#endif
// Set the wavelet parameters
wavelet->pixel_type[band] = PIXEL_TYPE_16S;
wavelet->num_bands = 2;
result = DecodeSampleEmptyBand(decoder, input, wavelet, band);
// Set the subband number for the next band expected in the bitstream
codec->band.subband = 11;
}
// Is this a highpass band?
else if (subband > 0)
{
// Decode a highpass band
// Get the wavelet that contains this subband
index = subband_wavelet_index[subband];
wavelet = transform->wavelet[index];
// Get the wavelet parameters decoded from the bitstream
width = codec->band.width;
height = codec->band.height;
level = codec->highpass.wavelet_level;
type = codec->highpass.wavelet_type;
band = codec->band.number;
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type);
#else
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
#endif
result = DecodeSampleHighPassBand(decoder, input, wavelet, band, threading);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandStartedFlags(decoder, wavelet, band);
}
// Reset the default encoding method
codec->band.encoding = BAND_ENCODING_RUNLENGTHS;
// Set the subband number for the next band expected in the bitstream
codec->band.subband = subband + 1;
}
else
{
// Decode a lowpass band
// Get the wavelet that contains this subband
index = subband_wavelet_index[0];
wavelet = transform->wavelet[index];
// Get the wavelet parameters decoded from the bitstream
width = codec->lowpass.width;
height = codec->lowpass.height;
level = codec->lowpass.level;
type = codec->first_wavelet;
//band = codec->band.number;
band = 0;
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type);
#else
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
#endif
// The lowpass data is always stored in wavelet band zero
assert(band == 0);
// The lowpass band must be subband zero
assert(subband == 0);
result = DecodeSampleLowPassBand(decoder, input, wavelet);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band);
}
// Set the subband number for the next band expected in the bitstream
codec->band.subband = subband + 1;
}
// Was the subband successfully decoded?
if (result)
{
// The transform will set the band valid flag if this is the temporal wavelet
//if (index != 2)
// Record that this subband has been decoded successfully
if (0 <= subband && subband <= CODEC_MAX_SUBBAND)
codec->decoded_subband_flags |= DECODED_SUBBAND_MASK(subband);
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded subband: %d, wavelet: %d, channel: %d\n",
subband, index, channel);
}
#endif
}
#if _THREADED_DECODER
// Ready to queue a threaded transform to invert this wavelet?
if (BANDS_ALL_STARTED(wavelet))
{
// Are frames being decoded to quarter resolution?
if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER))
{
// Smallest spatial wavelet above the lowpass temporal band (fieldplus transform)
int highest_index = 5;
if (transform_type == TRANSFORM_TYPE_SPATIAL)
{
// Smallest wavelet in the spatial transform
highest_index = 2;
}
// Only the smallest spatial wavelet must be reconstructed
if (index != highest_index) {
return result;
}
//TODO: Can we improve on the current scheme for quarter resolution decoding?
}
if ((transform->type == TRANSFORM_TYPE_SPATIAL && index > 0) || index >= 2)
{
if(decoder->entropy_worker_new.pool.thread_count && threading)
{
ReconstructWaveletBand(decoder, transform, codec->channel, wavelet, index,
codec->precision, &decoder->scratch, 1);
// Add the inverse wavelet transform to the processing queue
QueueThreadedTransform(decoder, codec->channel, index);
}
else
{
// Apply the inverse wavelet transform to reconstruct the lower level wavelet
ReconstructWaveletBand(decoder, transform, codec->channel, wavelet, index,
codec->precision, &decoder->scratch, 0);
}
}
}
#else
// Ready to invert this wavelet to get the lowpass band in the lower wavelet?
if (BANDS_ALL_VALID(wavelet))
{
int channel = codec->channel;
//PIXEL *buffer = (PIXEL *)decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
int band;
sprintf(label, "Channel: %d, index: %d", channel, index);
DumpImageStatistics(label, wavelet, logfile);
#if 1
for (band = 1; band < wavelet->num_bands; band++)
{
sprintf(label, "Channel: %d, index: %d, band: %d", channel, index, band);
DumpBandStatistics(label, wavelet, band, logfile);
}
#endif
}
#endif
// Are frames being decoded to quarter resolution?
if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER))
{
// Smallest spatial wavelet above the lowpass temporal band (fieldplus transform)
int highest_index = 5;
if (transform_type == TRANSFORM_TYPE_SPATIAL)
{
// Smallest wavelet in the spatial transform
highest_index = 2;
}
// Only the smallest spatial wavelet must be reconstructed
if (index != highest_index) {
return result;
}
//TODO: Can we improve on the current scheme for quarter resolution decoding?
}
// Apply the inverse wavelet transform to reconstruct the lower level wavelet
ReconstructWaveletBand(decoder, transform, channel, wavelet, index, precision, &decoder->scratch, 0);
}
#endif
return result;
}
// Decode the coefficients in a lowpass band
bool DecodeSampleLowPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
bool result = true;
int lowpass_width; // Lowpass band dimensions
int lowpass_height;
int lowpass_pitch;
PIXEL *pLowPassRow; // Pointer into the lowpass band
//int wavelet_width; // Dimensions of the wavelet image
//int wavelet_height;
int bits_per_pixel;
int quantization;
int offset;
//int pixel_divisor = (1 << (2 * codec->lowpass.level));
int row, column;
int32_t solid_color = -1;
const int gain = 128;
const int colorshift = 0;
// int channelgain[4];
//int waterrow=19, watercol=214;
//int cspace = decoder->frame.colorspace;
// Lowpass image dimensions may be smaller than the wavelet dimensions
// because the encoder may have transmitted an image without the border
lowpass_width = codec->lowpass.width;
lowpass_height = codec->lowpass.height;
lowpass_pitch = wavelet->pitch/sizeof(PIXEL);
pLowPassRow = wavelet->band[0];
// Get the parameters for quantization performed by the encoder
quantization = codec->lowpass.quantization;
offset = codec->lowpass.pixel_offset;
bits_per_pixel = codec->lowpass.bits_per_pixel;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decode lowpass subband\n");
}
#endif
if (bits_per_pixel == 16 && stream->nBitsFree == BITSTREAM_BUFFER_SIZE && !(lowpass_width&1))
{
int32_t *lpCurrentLong = (int32_t *)stream->lpCurrentWord;
//int signval = 0;
//int channel3stats = 0;
int channeloffset = 0;
if(decoder->codec.precision == 8)
{
channeloffset = (codec->num_frames==2 ? 64 : 32);
}
else if(decoder->codec.precision == 10)
{
switch(decoder->frame.format)
{
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_V210:
channeloffset = codec->num_frames==2 ? 14 : 4;//DAN20090601, recal I-frame DAN20110301
break;
default:
channeloffset = codec->num_frames==2 ? 48 : 24;//DAN20090601
}
if(decoder->sample_uncompressed) //DAN20110301 was testing the GOP length for this (why?)
channeloffset = 0; //DAN20100822 -- Prevent offset between uncompressed V210 and compressed frames
}
else if(decoder->codec.precision == 12)
{
switch(decoder->frame.format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB24_INVERTED:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB32_INVERTED:
channeloffset = 8; //DAN200906010
break;
// 16-bit precision:
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_W13A:
channeloffset = 0;
break;
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
channeloffset = 6; //DAN200906010 //DAN20100822 -- prefect for uncompressed to compressed.
break;
default:
channeloffset = 0;
break;
}
}
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) //DAN20090728 -- Prevent offset between uncompressed and compressed RAW frames
channeloffset = 0;
#define DUMPLL 0
#if (_DEBUG && DUMPLL)
FILE *fp;
if(channel == 0)
{
static int inc = 1;
char name[256];
sprintf(name,"C:\\Cedoc\\LLdec%03d.pgm", inc++);
fp = fopen(name,"w");
fprintf(fp, "P2\n# CREATOR: DAN\n%d %d\n255\n", lowpass_width, lowpass_height);
}
#endif
#if LOSSLESS
channeloffset = 0; //LOSSLESS
#endif
//if(lpCurrentLong[0] == 0xffffffff)
if(lpCurrentLong[0] == (int32_t)UINT32_MAX)
{
if(SwapInt32BtoN(lpCurrentLong[2]) == (uint32_t)lowpass_width)
{
if(SwapInt32BtoN(lpCurrentLong[3]) == (uint32_t)lowpass_height)
{
solid_color = SwapInt32BtoN(lpCurrentLong[1]);
solid_color |= (solid_color<<16);
lpCurrentLong += 4;
}
}
}
// Decode each row in the lowpass image
for (row = 0; row < lowpass_height; row++)
{
int pixels;
// Start at the first column
column = 0;
// Process the rest of the row
{
for (; column < lowpass_width; column++)
{
int pixel_value;
//int i;
// Perform inverse quantization
if(column & 1)
{
pixel_value = pixels;
}
else
{
//pixels = _bswap(*(lpCurrentLong++));
if(solid_color == -1)
pixels = SwapInt32BtoN(*(lpCurrentLong++));
else
pixels = solid_color;
pixel_value = (pixels>>16);
pixels <<= 16;
pixels >>= 16;
}
// Store the pixel in the lowpass band of the wavelet
pixel_value += channeloffset;
// pixel_value -= 64;
// pixel_value += ((rand() & 0x7fff) - 0x4000);
// if(pixel_value < 0) pixel_value = 0;
if(pixel_value > 0x7fff) pixel_value = 0x7fff;
pLowPassRow[column] = pixel_value;
#if (_DEBUG && DUMPLL)
if(channel==0 && fp)
fprintf(fp, "%d\n", pixel_value>>7);
#endif
}
}
// Advance to the next row in the lowpass image
pLowPassRow += lowpass_pitch;
}
#if (_DEBUG && DUMPLL)
if(channel == 0 && fp)
fclose(fp);
#endif
#if ERROR_TOLERANT
// Update the count of bytes used
stream->nWordsUsed -= (int)(((intptr_t)lpCurrentLong - (intptr_t)stream->lpCurrentWord));
#endif
// Update the bitstream
stream->lpCurrentWord = (uint8_t *)lpCurrentLong;
}
else if (bits_per_pixel == 8 && stream->nBitsFree == BITSTREAM_BUFFER_SIZE)
{
uint8_t *lpCurrentByte = (uint8_t *)stream->lpCurrentWord;
//int signval = 0;
// Decode each row in the lowpass image
for (row = 0; row < lowpass_height; row++)
{
// Start at the first column
column = 0;
// Process the rest of the row
for (; column < lowpass_width; column++)
{
int pixel_value = *(lpCurrentByte++);
// Perform inverse quantization
#if _ENCODE_CHROMA_ZERO
if (channel == 0)
pixel_value = (quantization * pixel_value) + offset;
else
pixel_value = (pixel_value - offset) * quantization;
#else
pixel_value = (quantization * pixel_value) + offset;// + colorshift;
#endif
pixel_value -= 128 * quantization;
pixel_value *= gain;
pixel_value >>= 7;
pixel_value += 128 * quantization;
pixel_value += colorshift;
// Store the pixel in the lowpass band of the wavelet
// Multiply by 16 to turn 8-bit into the new 16-bit format
pLowPassRow[column] = pixel_value * 16;
}
// Advance to the next row in the lowpass image
pLowPassRow += lowpass_pitch;
}
#if ERROR_TOLERANT
// Update the count of bytes used
stream->nWordsUsed -= (int)(((intptr_t)lpCurrentByte - (intptr_t)stream->lpCurrentWord));
#endif
// Update the bitstream
stream->lpCurrentWord = (uint8_t *)lpCurrentByte;
}
else
{
int channeloffset = 0;
if(decoder->codec.precision == 8)
{
channeloffset = (codec->num_frames==2 ? 64 : 32);
}
else if(decoder->codec.precision == 10)
{
channeloffset = (codec->num_frames==2 ? 10 : 5);
}
else if(decoder->codec.precision == 12)
{
// channeloffset = (codec->num_frames==2 ? 4 : 2); // Seems to result in less shift using the viper images
}
//DAN20050923 no longer trying to compensate for YUV to RGB issues.
if(decoder->frame.format == DECODED_FORMAT_RGB24 || decoder->frame.format == DECODED_FORMAT_RGB32)
{
if(decoder->codec.precision == 8)
{
switch(channel)
{
case 0: channeloffset += 8; break; // fixed rounding error introduced by YUV->RGB
case 1: channeloffset += 16; break;
case 2: channeloffset += 10; break;
}
}
else if(decoder->codec.precision == 10)
{
switch(channel)
{
case 0: channeloffset += -8; break; // fixed rounding error introduced by YUV->RGB
case 1: channeloffset += -4; break;
case 2: channeloffset += -4; break;
}
}
else if(decoder->codec.precision == 12)
{
switch(channel)
{
case 0: channeloffset += 0; break; // fixed rounding error introduced by YUV->RGB
case 1: channeloffset += 0; break;
case 2: channeloffset += 0; break;
}
}
}
if(bits_per_pixel != 16)
channeloffset = 0;
for (row = 0; row < lowpass_height; row++)
{
for (column = 0; column < lowpass_width; column++) {
int pixel_value = GetBits(stream, bits_per_pixel);
// Perform inverse quantization
#if _ENCODE_CHROMA_ZERO
if (channel == 0)
pixel_value = (quantization * pixel_value) + offset;
else
pixel_value = (pixel_value - offset) * quantization;
#else
pixel_value = (quantization * pixel_value) + offset;// + colorshift;
#endif
// Store the pixel in the lowpass band of the wavelet
pLowPassRow[column] = SATURATE(pixel_value + channeloffset); // DAN20050926 added chromaoffet to match the normal path -- this code will be used for SD (720) encodes
}
stream->nWordsUsed -= lowpass_width*(bits_per_pixel>>3);
// Advance to the next row in the lowpass image
pLowPassRow += lowpass_pitch;
}
}
// Set the wavelet scale factor
wavelet->scale[0] = quantization;
// Align the bitstream to the next tag value pair
AlignBitsTag(stream);
// Return indication of lowpass decoding success
return result;
}
// Decode the coefficients in a highpass band
bool DecodeSampleHighPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band, int threading)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
//int channel = codec->channel;
//int subband = codec->band.subband;
//int index = codec->highpass.wavelet_number;
int width;
int height;
int quantization;
// The encoder may not have used variable-length coding
int method = codec->band.encoding;
bool result = true;
// Check that the band index is in range
assert(0 <= band && band <= codec->max_subband);
// Encoded coefficients start on a tag boundary
AlignBitsTag(stream);
#if (0 && DEBUG)
// Dump the band header to the logfile
if (logfile) {
fprintf(logfile,
"Band header marker: 0x%04X, subband: %d, width: %d, height: %d, encoding: %d\n",
header->marker, header->subband, header->width, header->height, header->encoding);
}
#endif
// Copy the scale factors used by the encoder into the wavelet band
// (Zero means that the encoder did not supply this parameter)
if (codec->band.scale > 0) {
wavelet->scale[band] = codec->band.scale;
}
// Get the quantization factor that was used to encode the band coefficients
quantization = codec->band.quantization;
// Copy the quantization into the wavelet
wavelet->quantization[band] = quantization;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decode highpass subband: %d, quantization: %d\n", subband, quantization);
}
#endif
// Get the highpass band dimensions
width = codec->band.width;
height = codec->band.height;
// Is this a special band for the temporal high pass thumbnail?
if (method == BAND_ENCODING_LOSSLESS)
{
//lossless temporal subband //DAN20060701
result = DecodeBand16sLossless(decoder, stream, wavelet, band, width, height);
assert(result);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band);
}
}
else if (method == BAND_ENCODING_16BIT)
{
//lossless temporal subband //DAN20060701
result = DecodeBand16s(decoder, stream, wavelet, band, width, height);
assert(result);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band);
}
}
else
{
// Must use the runlength encoding method
assert(codec->band.encoding == BAND_ENCODING_RUNLENGTHS);
#if 0
// This code attempts to not decode various subbands for 1/4 res decodes.
// Unforuntately playback would stop after 5 seonds with this code (but not in debug mode.)
if (subband >= 4 && subband <= 6)
{
TAGVALUE segment;
AlignBitsTag(stream);
do
{
segment = GetTagValue(stream);
}
while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
else
#elif 0
// Is this subband required for decoding the frame?
if (CanSkipSubband(decoder, subband))
{
// Skip past the end of this subband
SkipSubband(stream);
}
#endif
// Decode this subband
result = DecodeFastRunsFSM16s(decoder, stream, wavelet, band, width, height, threading);
}
// Return failure if a problem was encountered while reading the band coefficients
if (!result) return result;
// The encoded band coefficients end on a bitstream word boundary
// to avoid interference with the marker for the coefficient band trailer
AlignBits(stream);
// Decode the band trailer
error = DecodeBandTrailer(stream, NULL);
decoder->error = error;
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY) {
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Error in band %d trailer: %d\n", band, error);
}
#endif
return false;
}
return result;
}
// Decode an empty band
bool DecodeSampleEmptyBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int quantization;
// Check that the band is in range
assert(0 <= band && band <= CODEC_MAX_HIGHBANDS);
// Check that the highpass band is 16 bits
assert(wavelet->pixel_type[1] == PIXEL_TYPE_16S);
#if (0 && DEBUG)
//TODO: Change format string to handle 64-bit pointers
if (logfile) {
fprintf(logfile, "Start decoding an empty band, stream: 0x%p\n", stream->lpCurrentWord);
}
#endif
// Encoded coefficients must start on a word boundary
AlignBits(stream);
// Copy the scale factors used by the encoder into the wavelet band
// (Zero means that the encoder did not supply the parameter)
if (codec->band.scale > 0)
wavelet->scale[band] = codec->band.scale;
// Set the quantization used to encode the band coefficients
quantization = codec->band.quantization;
wavelet->quantization[band] = quantization;
#if (0 && DEBUG)
if (logfile) {
DumpBits(stream, logfile);
}
#endif
// Decode the band trailer
error = DecodeBandTrailer(stream, NULL);
decoder->error = error;
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY) {
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Error in band: %d, error: %d\n", band, error);
}
#endif
return false;
}
// The encoded band coefficients end on a bitstream word boundary
// to avoid interference with the marker for the coefficient band trailer
AlignBits(stream);
#if (0 && DEBUG)
// Dump the band trailer to the logfile
if (logfile) {
fprintf(logfile, "Band trailer marker: 0x%04X\n", trailer->marker);
}
#endif
#if (0 && DEBUG)
if (logfile) {
//TODO: Change format string to handle 64-bit pointers
fprintf(logfile, "End decode empty band, stream: 0x%X\n", stream->lpCurrentWord);
}
#endif
return true;
}
bool DecodeBand16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
PIXEL *rowptr = wavelet->band[band_index];
int pitch = wavelet->pitch;
int row,dequant = wavelet->quantization[band_index];
// Convert the pitch from bytes to pixels
pitch /= sizeof(PIXEL);
//BAND_ENCODING_16BIT
if(dequant == 1)
{
for (row = 0; row < height; row++)
{
int column;
#if 0
for (column = 0; column < width; column++)
{
int value = GetWord16s(stream);
rowptr[column] = value;
}
#else // Mild speedup (2.5% overall half-res decode improvement.)
char *sptr = (char *)stream->lpCurrentWord;
char *dptr = (char *)rowptr;
for (column = 0; column < width; column++)
{
*(dptr+1) = *sptr++;
*dptr = *sptr++;
dptr+=2;
}
stream->lpCurrentWord += width*2;
stream->nWordsUsed += width*2;
#endif
rowptr += pitch;
}
}
else
{
for (row = 0; row < height; row++)
{
int column;
for (column = 0; column < width; column++)
{
int value = GetWord16s(stream);
rowptr[column] = value*dequant;
}
rowptr += pitch;
}
}
#if (0 && DEBUG)
{
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
sprintf(label, "Hightemp-decode-%d-", count);
DumpBandPGM(label, wavelet, band_index, NULL);
}
count++;
}
#endif
return true;
}
bool DecodeBand16sLossless(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
int result = true;
int quant = wavelet->quantization[band_index];
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[decoder->codec.active_codebook];
int size;
PIXEL *rowptr;
//int row = 0;
int pitch;
//CODEC_STATE *codec = &decoder->codec;
//int channel = codec->channel;
//int subband = codec->band.subband;
//int num_subbands = codec->num_subbands;
//int pixel_type = wavelet->pixel_type[band_index];
//int difference_coding = decoder->codec.difference_coding;
//int localquant = 1;
//int threading = 0;
decoder->codec.active_codebook = 0; // reset CODEC state
decoder->codec.difference_coding = 0; //reset state for next subband
// Must have a valid wavelet
assert(wavelet != NULL);
if (! (wavelet != NULL)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
//Must have a valid FSM
assert(fsm != NULL);
if (! (fsm != NULL)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// All rows are treated as one int32_t row that covers the entire band
size = fsm->table.num_states;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is intended for 8-bit pixels
assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_16S);
rowptr = (PIXEL *)wavelet->band[band_index];
pitch = wavelet->pitch;
assert(rowptr != NULL && pitch != 0);
if (! (rowptr != NULL && pitch != 0)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
DeQuantFSM(fsm, 1); // can;t use this to dequant as we split the cooefficients into high and low bytes.
if (!DecodeBandFSM16sNoGap2Pass(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, quant)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
if(quant)
{
int x,y;
PIXEL *line = rowptr;
if(quant == 32)
{
for(y=0;y<height;y++)
{
for(x=0;x<width;x++)
{
line[x] <<= 5;
}
line += pitch/2;
}
}
else
{
for(y=0;y<height;y++)
{
for(x=0;x<width;x++)
{
line[x] *= quant;
}
line += pitch/2;
}
}
}
/* if(once <= 60)
{
char name[200];
FILE *fp;
sprintf(name,"C:/Cedoc/DUMP/Decoder/dump%02d.raw", once);
fp = fopen(name,"wb");
fwrite(rowptr,width*height,1,fp);
fclose(fp);
once++;
}*/
assert(result == true);
if (! (result == true)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
return true;
}
// Invert the wavelet to reconstruct the lower wavelet in the transform
void ReconstructWaveletBand(DECODER *decoder, TRANSFORM *transform, int channel,
IMAGE *wavelet, int index, int precision,
const SCRATCH *scratch, int allocations_only)
{
int transform_type = transform->type;
int width = wavelet->width;
int height = wavelet->height;
int level = wavelet->level;
PIXEL *buffer = (PIXEL *)scratch->free_ptr;
size_t buffer_size = scratch->free_size;
// Is the current wavelet a spatial wavelet?
if (transform_type == TRANSFORM_TYPE_SPATIAL && index > 0)
{
// Reconstruct the lowpass band in the lower wavelet
int lowpass_index = index - 1;
IMAGE *lowpass = transform->wavelet[lowpass_index];
int lowpass_width = 2 * width;
int lowpass_height = 2 * height;
int lowpass_level = level - 1;
int lowpass_type = (lowpass_index == 0) ? WAVELET_TYPE_FRAME : WAVELET_TYPE_SPATIAL;
//const int prescale = 1;
const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT);
int prescale = transform->prescale[index];
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
lowpass = GetWaveletThreadSafe(decoder, transform, lowpass_index,
lowpass_width, lowpass_height,
lowpass_level, lowpass_type);
#else
// Allocate the wavelet if not already allocated
#if _ALLOCATOR
lowpass = ReallocWaveletEx(decoder->allocator, lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#else
lowpass = ReallocWaveletEx(lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#endif
transform->wavelet[lowpass_index] = lowpass;
#endif
// Check that the lowpass band has not already been reconstructed
//assert((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0);
if(!allocations_only)
{
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(wavelet));
// Has this wavelet already been reconstructed?
if ((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0)
{
// Perform the inverse spatial transform before decoding the next wavelet
STOP(tk_decoding);
START(tk_inverse);
//TransformInverseSpatialQuantLowpass(wavelet, lowpass, buffer, buffer_size, prescale, inverse_prescale);
TransformInverseSpatialQuantLowpass(wavelet, lowpass, scratch, prescale, inverse_prescale);
STOP(tk_inverse);
START(tk_decoding);
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, lowpass, 0);
#if TIMING
// Increment the count of spatial transforms performed during decoding
spatial_decoding_count++;
#endif
}
}
}
// Is the current wavelet a spatial wavelet above the temporal lowpass band?
else if (index > 3)
{
// Reconstruct the lowpass band in the lower wavelet
const int temporal_wavelet_index = 2;
int lowpass_index = (index > 4) ? index - 1 : index - 2;
IMAGE *lowpass = transform->wavelet[lowpass_index];
int lowpass_width = 2 * width;
int lowpass_height = 2 * height;
int lowpass_level = level - 1;
int lowpass_type = ((lowpass_index == temporal_wavelet_index) ? WAVELET_TYPE_TEMPORAL : WAVELET_TYPE_SPATIAL);
//const int prescale = 2;
const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT);
int prescale = transform->prescale[index];
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
lowpass = GetWaveletThreadSafe(decoder, transform, lowpass_index,
lowpass_width, lowpass_height,
lowpass_level, lowpass_type);
#else
// Allocate the wavelet if not already allocated
#if _ALLOCATOR
lowpass = ReallocWaveletEx(decoder->allocator, lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#else
lowpass = ReallocWaveletEx(lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#endif
transform->wavelet[lowpass_index] = lowpass;
#endif
if(!allocations_only)
{
// Check that the lowpass band has not already been reconstructed
assert((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0);
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(wavelet));
// Perform the inverse spatial transform before decoding the next wavelet
STOP(tk_decoding);
START(tk_inverse);
//TransformInverseSpatialQuantLowpass(wavelet, lowpass, buffer, buffer_size, prescale, inverse_prescale);
TransformInverseSpatialQuantLowpass(wavelet, lowpass, scratch, prescale, inverse_prescale);
STOP(tk_inverse);
START(tk_decoding);
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, lowpass, 0);
#if TIMING
// Increment the count of spatial transforms performed during decoding
spatial_decoding_count++;
#endif
}
}
// Is the current wavelet the spatial wavelet above the temporal highpass band?
else if (index == 3)
{
// Reconstruct the highpass band in the temporal wavelet
const int temporal_wavelet_index = 2;
int highpass_index = index - 1;
IMAGE *highpass = transform->wavelet[highpass_index];
int highpass_width = 2 * width;
int highpass_height = 2 * height;
int highpass_level = level - 1;
int highpass_type = ((highpass_index == temporal_wavelet_index) ? WAVELET_TYPE_TEMPORAL : WAVELET_TYPE_SPATIAL);
const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT);
int prescale = inverse_prescale ? transform->prescale[index] : 0;
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
highpass = GetWaveletThreadSafe(decoder, transform, highpass_index,
highpass_width, highpass_height,
highpass_level, highpass_type);
#else
// Allocate the wavelet if not already allocated
#if _ALLOCATOR
highpass = ReallocWaveletEx(decoder->allocator, highpass , highpass_width, highpass_height, highpass_level, highpass_type);
#else
highpass = ReallocWaveletEx(highpass , highpass_width, highpass_height, highpass_level, highpass_type);
#endif
transform->wavelet[highpass_index] = highpass;
#endif
if(!allocations_only)
{
// Check that the highpass band has not already been reconstructed
assert((highpass->band_valid_flags & BAND_VALID_MASK(1)) == 0);
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(wavelet));
// Perform the inverse spatial transform before decoding the next wavelet
STOP(tk_decoding);
START(tk_inverse);
TransformInverseSpatialQuantHighpass(wavelet, highpass, buffer, buffer_size, prescale);
STOP(tk_inverse);
START(tk_decoding);
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, highpass, 1);
#if TIMING
// Increment the count of spatial transforms performed during decoding
spatial_decoding_count++;
#endif
}
}
// Is the current wavelet the temporal wavelet?
else if (index == 2)
{
// Get the temporal wavelet
IMAGE *temporal = wavelet;
// Set the frame wavelet parameters
int frame_level = 1;
int frame_type = WAVELET_TYPE_FRAME;
// Get the two frame wavelets
IMAGE *frame[2];
frame[0] = transform->wavelet[0];
frame[1] = transform->wavelet[1];
// Check that the temporal wavelet is valid
assert(temporal->num_bands == 2 && temporal->wavelet_type == WAVELET_TYPE_TEMPORAL);
#if _THREADED_DECODER
// Allocate (or reallocate) the frame wavelets with thread safety
frame[0] = GetWaveletThreadSafe(decoder, transform, 0, width, height, frame_level, frame_type);
frame[1] = GetWaveletThreadSafe(decoder, transform, 1, width, height, frame_level, frame_type);
#else
// Allocate the frame wavelets if not already allocated
#if _ALLOCATOR
frame[0] = ReallocWaveletEx(decoder->allocator, frame[0], width, height, frame_level, frame_type);
frame[1] = ReallocWaveletEx(decoder->allocator, frame[1], width, height, frame_level, frame_type);
#else
frame[0] = ReallocWaveletEx(frame[0], width, height, frame_level, frame_type);
frame[1] = ReallocWaveletEx(frame[1], width, height, frame_level, frame_type);
#endif
transform->wavelet[0] = frame[0];
transform->wavelet[1] = frame[1];
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Before inverse temporal transform");
DumpArray16s("Temporal Lowpass", temporal->band[0], temporal->width, temporal->height, temporal->pitch, logfile);
DumpArray16s("Temporal Highpass", temporal->band[1], temporal->width, temporal->height, temporal->pitch, logfile);
}
#endif
if(!allocations_only)
{
// Check that the lowpass bands have not already been reconstructed
assert((frame[0]->band_valid_flags & BAND_VALID_MASK(0)) == 0);
assert((frame[1]->band_valid_flags & BAND_VALID_MASK(0)) == 0);
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(temporal));
// Invert the temporal transform between the frame wavelets
STOP(tk_decoding);
START(tk_inverse);
TransformInverseTemporalQuant(temporal, frame[0], frame[1], buffer, buffer_size, precision);
STOP(tk_inverse);
START(tk_decoding);
#if (0 && DEBUG)
if (logfile) {
IMAGE *wavelet = quad[0];
fprintf(logfile, "After inverse temporal transform\n");
DumpArray16s("Temporal Lowpass", temporal->band[0], temporal->width, temporal->height, temporal->pitch, logfile);
DumpArray16s("Temporal Highpass", temporal->band[1], temporal->width, temporal->height, temporal->pitch, logfile);
DumpArray16s("First frame wavelet, band 0", wavelet->band[0], wavelet->width, wavelet->height, wavelet->pitch, logfile);
}
#endif
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, frame[0], 0);
UpdateWaveletBandValidFlags(decoder, frame[1], 0);
#if TIMING
// Increment the number of temporal transforms performed outside of decoding
temporal_decoding_count++;
#endif
}
}
}
// Compute the dimensions of the output buffer
void ComputeOutputDimensions(DECODER *decoder, int frame,
int *decoded_width_out, int *decoded_height_out)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
FRAME_INFO *info = &decoder->frame;
//int progressive = codec->progressive;
TRANSFORM **transform_array = decoder->transform;
//IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
IMAGE *wavelet = NULL;
int wavelet_width;
int wavelet_height;
int decoded_width;
int decoded_height;
int resolution = info->resolution;
//int chroma_offset = decoder->codec.chroma_offset;
int decoded_scale = 0;
if (decoded_width_out == NULL || decoded_height_out == NULL) {
return;
}
// Clear the return values in case this routine terminates early
*decoded_width_out = 0;
*decoded_height_out = 0;
// Get the decoding scale
switch(resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_HALF_HORIZONTAL:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame));
#endif
decoded_scale = 2;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_HALF:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_QUARTER:
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[0];
}
else
{
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[3];
}
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[5];
if(wavelet == NULL) // there Intra Frame compressed
wavelet = transform_array[0]->wavelet[2];
break;
default:
assert(0);
break;
}
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
decoded_width = wavelet_width;
else
decoded_width = decoded_scale * wavelet_width;
decoded_height = decoded_scale * wavelet_height;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded scale: %d, decoded width: %d, wavelet width: %d\n", decoded_scale, decoded_width, wavelet_width);
}
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded width: %d, height: %d, frame width: %d, height: %d, output pitch: %d\n",
decoded_width, decoded_height, info->width, info->height, pitch);
}
#endif
// Return the decoded width and height
*decoded_width_out = decoded_width;
*decoded_height_out = decoded_height;
}
#define DEBUG_ROW16U 0
void ReconstructSampleFrameToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
FRAME_INFO local_info;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
FRAME_INFO *info = &local_info;
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
int progressive = codec->progressive;
TRANSFORM **transform_array = decoder->transform;
IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
IMAGE *wavelet;
int wavelet_width;
int wavelet_height;
int decoded_width;
int decoded_height;
int resolution = decoder->frame.resolution;
int chroma_offset = decoder->codec.chroma_offset;
int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed;
//TODO: Change this routine to return the codec error code
CODEC_ERROR error = CODEC_ERROR_OKAY;
//if(decoder->cfhddata.calibration)
// LoadTweak();
//TODO: Change this routine to return an error code
if (decoder == NULL) {
return;
}
decoder->gop_frame_num = frame;
#if _THREADED_DECODER
// Wait until the transform thread has finished all pending transforms
WaitForTransformThread(decoder);
#endif
//return;
// copy frame info in a changable local structure
memcpy(info, &decoder->frame, sizeof(FRAME_INFO));
// Use the old code for reconstructing the frame
#if (0 && DEBUG)
// Force quarter resolution decoding for debugging that feature
resolution = DECODED_RESOLUTION_QUARTER;
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Inverting last wavelet, frame: %d\n", frame);
}
#endif
// The decoder can decode a video sample without returning a frame
if (output == NULL || pitch == 0) return;
#if (1 && DEBUG_ROW16U)
// Force decoding to 16-bit pixels for debugging
info->format = DECODED_FORMAT_YR16;
#endif
#if 0
if (info->format == DECODED_FORMAT_YR16)
{
// Force interlaced or progressive decoding for debugging
//progressive = false;
progressive = true;
}
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoder flags: 0x%p\n", decoder->flags);
}
#endif
// Does this frame have to be reconstructed?
if ((decoder->flags & DECODER_FLAGS_RENDER) == 0) {
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoder discarding frame: %d\n", frame);
}
#endif
return;
}
// Check that the requested frame is within the limits of the group of frames
assert(0 <= frame && frame < decoder->gop_length);
// Check that the frame resolution is valid
assert(IsValidFrameResolution(resolution));
if (!IsValidFrameResolution(resolution)) {
decoder->error = CODEC_ERROR_RESOLUTION;
return;
}
#if (0 && TIMING) //(0 && DEBUG)
// Override progressive flag read from the bitstream for debugging
//progressive = 0; // Use the inverse frame transform
progressive = 1; // Use the inverse spatial transform
#endif
// Build the 3D LUTs if needed
ComputeCube(decoder);
//HACK DAN20110131 -- some formats will not directly decode so need to use the AM route
{
if( decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422 &&
resolution == DECODED_RESOLUTION_HALF)
{
if( decoder->frame.format == COLOR_FORMAT_R408 ||
decoder->frame.format == COLOR_FORMAT_V408)
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true;
}
}
if( decoder->frame.format == COLOR_FORMAT_NV12)
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true; // TODO, make it work with this.
}
if (decoder->codec.progressive == false && decoder->frame.format == COLOR_FORMAT_RGB24)
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true;
}
}
// Get the decoding scale
if(!uncompressed)
{
switch(resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = 2 * wavelet_width;
decoded_height = 2 * wavelet_height;
break;
case DECODED_RESOLUTION_HALF:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = wavelet_height;
break;
case DECODED_RESOLUTION_HALF_HORIZONTAL:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = 2 * wavelet_height;
break;
case DECODED_RESOLUTION_QUARTER:
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
}
else
{
wavelet = transform_array[0]->wavelet[3];
}
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = wavelet_height;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
wavelet = transform_array[0]->wavelet[5];
if(wavelet == NULL) // there Intra Frame compressed
wavelet = transform_array[0]->wavelet[2];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = wavelet_height;
break;
default:
assert(0);
break;
}
}
else
{
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
decoded_width = info->width/2;
decoded_height = info->height/2;
}
else
{
decoded_width = info->width;
decoded_height = info->height;
}
}
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
if(resolution == DECODED_RESOLUTION_FULL)
{
if(decoded_width*2 == info->width)
{
info->width /= 2;
info->height /= 2;
info->resolution = resolution = DECODED_RESOLUTION_FULL_DEBAYER;
}
}
else if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
if(decoded_width*2 == info->width)
{
info->width /= 2;
info->height /= 2;
}
}
else if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
if(decoded_width*2 == info->width)
{
info->height /= 2;
info->resolution = resolution = DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER;
}
}
else if(decoder->frame.format == DECODED_FORMAT_BYR2 || decoder->frame.format == DECODED_FORMAT_BYR4)
{
if(decoded_width*2 == info->width)
{
info->width /= 2;
info->height /= 2;
info->resolution = resolution = DECODED_RESOLUTION_HALF_NODEBAYER;
}
}
else
{
if(resolution == DECODED_RESOLUTION_HALF)
{
if(decoded_width*2 == info->width)
{
decoded_width *= 2;
decoded_height *= 2;
info->resolution = resolution = DECODED_RESOLUTION_FULL;
}
}
else if(resolution == DECODED_RESOLUTION_QUARTER)
{
if(uncompressed)
{
decoded_width *= 2;
decoded_height *= 2;
info->resolution = resolution = DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED;
}
else
{
if(decoded_width == info->width)
{
info->resolution = resolution = DECODED_RESOLUTION_HALF;
}
}
}
}
}
if(uncompressed)
{
// Call the appropriate routine for the encoded format
switch (decoder->codec.encoded_format)
{
case ENCODED_FORMAT_YUVA_4444: // Four planes of YUVA 4:4:4:4
// Not implemented
assert(0);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case ENCODED_FORMAT_BAYER: // Bayer encoded data
// Add new code here for the final steps in decoding the Bayer format
error = UncompressedSampleFrameBayerToBuffer(decoder, info, frame, output, pitch);
break;
case ENCODED_FORMAT_YUV_422: // Original encoding scheme for YUV 4:2:2 (always v210)
error = UncompressedSampleFrameYUVToBuffer(decoder, info, frame, output, pitch);//CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case ENCODED_FORMAT_RGB_444: // Original encoding scheme for RGB 444 (always DPX0)
error = UncompressedSampleFrameRGBToBuffer(decoder, info, frame, output, pitch);//CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
default:
// Fall through into the old code for reconstructing frames
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
}
else
{
// Call the appropriate routine for the encoded format
switch (decoder->codec.encoded_format)
{
case ENCODED_FORMAT_RGB_444: // channels = decoder->codec.num_channels; planes of RGB 4:4:4
case ENCODED_FORMAT_RGBA_4444: // Four planes of ARGB 4:4:4:4
error = ReconstructSampleFrameRGB444ToBuffer(decoder, frame, output, pitch);
break;
case ENCODED_FORMAT_YUVA_4444: // Four planes of YUVA 4:4:4:4
// Not implemented
assert(0);
//error = ReconstructSampleFrameYUVA4444ToBuffer(decoder, frame, output, pitch);
break;
case ENCODED_FORMAT_BAYER: // Bayer encoded data
// Add new code here for the final steps in decoding the Bayer format
error = ReconstructSampleFrameBayerToBuffer(decoder, info, frame, output, pitch);
break;
case ENCODED_FORMAT_YUV_422: // Original encoding scheme for YUV 4:2:2
// Add new code here for the final steps in decoding the original YUV 4:2:2 format
error = ReconstructSampleFrameYUV422ToBuffer(decoder, frame, output, pitch);
break;
default:
// Fall through into the old code for reconstructing frames
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
}
// Was the newer code able to successfully reconstruct the frame?
if (error != CODEC_ERROR_UNSUPPORTED_FORMAT)
{
// Save the codec error code in the decoder state and return
decoder->error = error;
return;
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded scale: %d, decoded width: %d, wavelet width: %d\n", decoded_scale, decoded_width, wavelet_width);
}
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded width: %d, height: %d, frame width: %d, height: %d, output pitch: %d\n",
decoded_width, decoded_height, info->width, info->height, pitch);
}
#endif
#if (0 && DEBUG)
if (logfile) {
IMAGE *wavelet = transform[0]->wavelet[frame];
int band = 0;
fprintf(logfile, "Luminance wavelet, frame: %d, band: %d\n", frame, band);
DumpArray16s("Lowpass Band", wavelet->band[band], wavelet->width, wavelet->height, wavelet->pitch, logfile);
}
#endif
// Check that the requested frame is large enough to hold the decoded frame
#if (0 && DEBUG)
//if (! (info->width >= decoded_width))
{
if (logfile) {
//fprintf(logfile, "Requested frame not large enough to hold decoded frame: %d < %d\n", info->width, decoded_width);
fprintf(logfile, "Output frame width: %d, decoded frame width: %d\n", info->width, decoded_width);
}
}
#endif
assert(info->width >= decoded_width);
assert((info->height+7)/8 >= (decoded_height+7)/8);
if (!(info->width >= decoded_width && (info->height+7)/8 >= (decoded_height+7)/8)) {
decoder->error = CODEC_ERROR_FRAMESIZE;
return;
}
#if (0 && DEBUG)
if (logfile) {
//SUBIMAGE subimage = SUBIMAGE_UPPER_LEFT(16, 16);
SUBIMAGE subimage = SUBIMAGE_UPPER_RIGHT(16, 16);
// Adjust the subimage to be at the middle of the right border
//subimage.row += wavelet_height/2 - 8;
DumpBand("SIF Image", wavelet, 0, &subimage, logfile);
}
#endif
START(tk_inverse);
if (resolution == DECODED_RESOLUTION_QUARTER)
{
int precision = codec->precision;
// Reconstruct the frame to quarter resolution
ReconstructQuarterFrame(decoder, num_channels, frame, output, pitch,
info, &decoder->scratch, precision);
}
else
// Was the first transform a frame transform (used for interlaced frames)?
if (!progressive)
{
// Can the inverse frame transform and output byte packing be done in one pass?
if ((resolution == DECODED_RESOLUTION_FULL) &&
(info->format == DECODED_FORMAT_YUYV || info->format == DECODED_FORMAT_UYVY))
{
// Apply the inverse frame transform and pack the results into the output buffer
int precision = codec->precision;
#if (0 && DEBUG)
DumpWaveletBandsPGM(wavelet, frame, num_channels);
#endif
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToYUV(decoder, frame, num_channels, output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToYUV(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
#endif
}
//#if BUILD_PROSPECT
else if (resolution == DECODED_RESOLUTION_FULL && info->format == DECODED_FORMAT_YR16)
{
// Apply the inverse frame transform and output rows of luma and chroma
//DWORD dwThreadID1;
//DWORD dwThreadID2;
//HANDLE thread1;
//HANDLE thread2;
int precision = codec->precision;
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels,
(PIXEL16U *)output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels,
(PIXEL16U *)output, pitch, info,
&decoder->scratch, chroma_offset, precision);
#endif
}
//#endif
else
{
// Reconstruct the frame as separate planes and combine the planes into a packed output image
int channel;
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
int scale = 13;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[5];
if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed.
{
scale = 12;
lowpass_images[channel] = transform_array[channel]->wavelet[2];
}
}
STOP(tk_inverse);
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
scale, decoder->codec.encoded_format, decoder->frame.white_point);
START(tk_inverse);
}
else
// In SIF resolution, no need to reconstruct the bottom-level wavelet transforms
// Just copy the lowpass images directly into output frame
if (resolution == DECODED_RESOLUTION_HALF)
{
int precision = codec->precision;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
}
STOP(tk_inverse);
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
precision, decoder->codec.encoded_format, decoder->frame.white_point);
START(tk_inverse);
}
// In full resolution, reconstruct the frame wavelet and
// convert the YUYV output to the specified color format
else
{
int precision = codec->precision;
TransformInverseFrameToBuffer(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
}
}
}
else // The first transform was a spatial transform (used for progressive frames)
{
// Can the inverse frame transform and output byte packing be done in one pass?
if ((resolution == DECODED_RESOLUTION_FULL) &&
(info->format == DECODED_FORMAT_YUYV || info->format == DECODED_FORMAT_UYVY) && // Output YUV
decoder->thread_cntrl.capabilities & _CPU_FEATURE_SSE2)
{
int precision = codec->precision;
//DWORD dwThreadID1;
//DWORD dwThreadID2;
//HANDLE thread1;
//HANDLE thread2;
// Apply the inverse frame transform and pack the results into the output buffer
#if _THREADED
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
uint8_t *pixoutput = output;
if(decoder->use_active_metadata_decoder) //WIP
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
pixoutput, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sBayerThruLUT);
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
pixoutput, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToBayerYUV);
}
}
else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YUV);
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToYUV);
}
#else
//TODO : Accelerated BAYER for single thread decoding.
assert(0);
// Transform the wavelets for each channel to the output image (not threaded)
//TransformInverseSpatialToYUV(decoder, transform_array, frame, num_channels, output, pitch, info,
// &decoder->scratch, chroma_offset, precision);
#endif
}
else if ((resolution == DECODED_RESOLUTION_FULL) && decoder->codec.encoded_format == ENCODED_FORMAT_BAYER &&
(info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32) && // Output RGB
decoder->thread_cntrl.capabilities & _CPU_FEATURE_SSE2 && decoder->use_active_metadata_decoder)
{
int precision = codec->precision;
//DWORD dwThreadID1;
//DWORD dwThreadID2;
//HANDLE thread1;
//HANDLE thread2;
// Apply the inverse frame transform and pack the results into the output buffer
#if _THREADED
{
uint8_t *pixoutput = output;
if(info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32)
{
pixoutput += (info->height-1)*pitch;
pitch = -pitch;
}
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
pixoutput, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sBayerThruLUT);
}
#endif
}
//#if BUILD_PROSPECT
else if (resolution == DECODED_RESOLUTION_FULL && info->format == DECODED_FORMAT_YR16)
{
// Apply the inverse frame transform and output rows of luma and chroma
int precision = codec->precision;
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
(PIXEL16U *)output, pitch, info,
&decoder->scratch, chroma_offset, precision);
#endif
}
//#endif
else
{
// Reconstruct the frame as separate planes and combine the planes into a packed output image
int channel;
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
//int precision = codec->precision;
int scale = 13;
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[5];
if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed.
{
scale = 12;
lowpass_images[channel] = transform_array[channel]->wavelet[2];
}
}
STOP(tk_inverse);
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
scale, decoder->codec.encoded_format, decoder->frame.white_point);
START(tk_inverse);
}
else
// In SIF resolution, no need to reconstruct the bottom-level wavelet transforms
// Just copy the lowpass images directly into output frame
if (resolution == DECODED_RESOLUTION_HALF || resolution == DECODED_RESOLUTION_HALF_NODEBAYER)// || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
int precision = codec->precision;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
char *format = decoded_format_string[info->format];
sprintf(label, "Output, channel: %d, format: %s", channel, format);
DumpImageStatistics(label, lowpass_images[channel], logfile);
}
#endif
}
STOP(tk_inverse);
#if 1 //|| BAYER_SUPPORT
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
//unsigned short scanline[4096*3],*sptr;
//unsigned short scanline2[4096*3],*sptr2;
unsigned short *scanline,*sptr;
unsigned short *scanline2,*sptr2;
char *buffer = decoder->scratch.free_ptr;
size_t buffer_size = decoder->scratch.free_size;
IMAGE *g_image = lowpass_images[0];
IMAGE *rg_image = lowpass_images[1];
IMAGE *bg_image = lowpass_images[2];
IMAGE *gd_image = lowpass_images[3];
uint8_t *outyuv,*line = output;
PIXEL *bayer_line, *bayerptr;
PIXEL *G,*RG,*BG,*GD;
int x,y;
int bayer_pitch = info->width*4;
int format = info->format;
bool inverted = false;
int maxbound = 4095; //10-bit source
int midpoint = 32768>>3;
int shift = 4;
if(precision == 12)
{
maxbound = 16383;
midpoint = 32768>>1;
shift = 2;
}
if(buffer_size < info->width * 2 * 3 * 2)
assert(0); // not enough memory
if (format == DECODED_FORMAT_RGB24 || format == DECODED_FORMAT_RGB32)
{
inverted = true;
line += (info->height-1)*pitch;
pitch = -pitch;
}
scanline = (unsigned short *)buffer;
buffer += info->width * 2 * 3;
scanline2 = (unsigned short *)buffer;
G = g_image->band[0];
RG = rg_image->band[0];
BG = bg_image->band[0];
for(y=0; y<info->height; y++)
{
uint8_t *newline = line;
PIXEL *newG=G,*newRG=RG,*newBG=BG;
PIXEL *gptr,*rgptr,*bgptr,*gdptr;
int r,g,b,rg,bg,y1,y2,u,v;
int r1,g1,b1;
int i;
newline += pitch*y;
newG += y * (g_image->pitch / sizeof(PIXEL));
newRG += y * (rg_image->pitch / sizeof(PIXEL));
newBG += y * (bg_image->pitch / sizeof(PIXEL));
gptr = newG;
rgptr = newRG;
bgptr = newBG;
sptr = scanline;
for(x=0; x<info->width; x++)
{
g = (*gptr++);
if(g > maxbound) g = maxbound;
rg = (*rgptr++);
bg = (*bgptr++);
r = (rg<<1) - midpoint + g;
b = (bg<<1) - midpoint + g;
if(r > maxbound) r = maxbound;
if(b > maxbound) b = maxbound;
if(r < 0) r = 0;
if(g < 0) g = 0;
if(b < 0) b = 0;
*sptr++ = r<<shift;
*sptr++ = g<<shift;
*sptr++ = b<<shift;
}
{
int flags = 0;
int whitebitdepth = 16;
sptr = scanline;
if(decoder->apply_color_active_metadata)
sptr = ApplyActiveMetaData(decoder, info->width, 1, y, scanline, scanline2,
info->format, &whitebitdepth, &flags);
ConvertLinesToOutput(decoder, info->width, 1, sptr,
newline, y, pitch,
info->format, whitebitdepth, flags);
}
}
#endif
}
else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
IMAGE *g_image = lowpass_images[0];
IMAGE *rg_image = lowpass_images[1];
IMAGE *bg_image = lowpass_images[2];
uint8_t *line = output;
unsigned char *rgb8;
PIXEL *G,*RG,*BG;
int x,y;
G = g_image->band[0];
RG = rg_image->band[0];
BG = bg_image->band[0];
if(info->format == DECODED_FORMAT_RGB32)
{
line = output;
line += (info->height-1) * pitch;
for(y=0; y<info->height; y++)
{
PIXEL *gptr,*rgptr,*bgptr;
int r,g,b;
int i,noisearray[32];
for(i=0; i<32; i++)
{
noisearray[i] = (rand() & 63);
}
gptr = G;
rgptr = RG;
bgptr = BG;
rgb8 = (unsigned char *)line;
for(x=0; x<info->width; x++)
{
int rnd = noisearray[x&31];
g = ((*gptr++) + rnd) >> 6;
r = ((*rgptr++) + rnd) >> 6;
b = ((*bgptr++) + rnd) >> 6;
if(r < 0) r=0; if(r > 255) r=255;
if(g < 0) g=0; if(g > 255) g=255;
if(b < 0) b=0; if(b > 255) b=255;
*rgb8++ = b;
*rgb8++ = g;
*rgb8++ = r;
*rgb8++ = 255;
}
line -= pitch;
G += g_image->pitch / sizeof(PIXEL);
RG += rg_image->pitch / sizeof(PIXEL);
BG += bg_image->pitch / sizeof(PIXEL);
}
}
else if(info->format == DECODED_FORMAT_RGB24)
{
line = output;
line += (info->height-1) * pitch;
for(y=0; y<info->height; y++)
{
PIXEL *gptr,*rgptr,*bgptr;
int r,g,b;
int i,noisearray[32];
for(i=0; i<32; i++)
{
noisearray[i] = (rand() & 63);
}
gptr = G;
rgptr = RG;
bgptr = BG;
rgb8 = (unsigned char *)line;
for(x=0; x<info->width; x++)
{
int rnd = noisearray[x&31];
g = ((*gptr++) + rnd) >> 6;
r = ((*rgptr++) + rnd) >> 6;
b = ((*bgptr++) + rnd) >> 6;
if(r < 0) r=0; if(r > 255) r=255;
if(g < 0) g=0; if(g > 255) g=255;
if(b < 0) b=0; if(b > 255) b=255;
*rgb8++ = b;
*rgb8++ = g;
*rgb8++ = r;
}
line -= pitch;
G += g_image->pitch / sizeof(PIXEL);
RG += rg_image->pitch / sizeof(PIXEL);
BG += bg_image->pitch / sizeof(PIXEL);
}
}
}
else
#endif
{
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
precision, decoder->codec.encoded_format, decoder->frame.white_point);
}
START(tk_inverse);
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
int width = info->width;
int height = info->height;
sprintf(label, "Output");
DumpBufferStatistics(label, output, width, height, pitch, logfile);
}
#endif
}
// In full resolution, reconstruct the frame wavelet and
// convert the YUYV output to the specified color format
else
{
// Handle inversion of the output image in this routine
FRAME_INFO info2;
int format;
bool inverted = false;
int precision = codec->precision;
memcpy(&info2, info, sizeof(FRAME_INFO));
format = info2.format;
if (format == DECODED_FORMAT_RGB24) {
format = DECODED_FORMAT_RGB24_INVERTED;
info2.format = format;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32) {
format = DECODED_FORMAT_RGB32_INVERTED;
info2.format = format;
inverted = true;
}
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
if(resolution == DECODED_RESOLUTION_FULL_DEBAYER)
height *= 2;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
//#if BUILD_PROSPECT
// Output the frame in V210 foramt?
if( (format == DECODED_FORMAT_V210 ||
format == DECODED_FORMAT_YU64) &&
decoder->codec.encoded_format != ENCODED_FORMAT_BAYER )
{
//char *buffer = decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
// The output buffer is an array of 10-bit pixels packed into double words
#if 0
TransformInverseSpatialToV210(transform_array, frame, num_channels, output, pitch, &info2,
buffer, buffer_size, chroma_offset, decoder->codec.precision);
#else
TransformInverseSpatialToV210(transform_array, frame, num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision);
#endif
}
else
//#endif
// Decoding a full resolution progressive frame to a Bayer output format?
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
//char *buffer = decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
// PIXEL16U *RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*sizeof(PIXEL), 16);
if(decoder->RawBayer16 == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
size_t size = info->width*decoded_height*4*sizeof(PIXEL);
decoder->RawBayer16 =
(PIXEL16U *)AllocAligned(allocator, size, 16);
#else
decoder->RawBayer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*sizeof(PIXEL), 16);
#endif
decoder->RawBayerSize = info->width*decoded_height*4*sizeof(PIXEL);
}
//TODO: Replace this memory allocation with a scratch buffer allocation
//#ifdef SHARPENING
if(decoder->RGBFilterBuffer16 == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
size_t size = info->width*decoded_height*4*3*sizeof(PIXEL);
decoder->RGBFilterBuffer16 =
(PIXEL16U *)AllocAligned(allocator, size, 16);
#else
decoder->RGBFilterBuffer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*3*sizeof(PIXEL), 16);
#endif
decoder->RGBFilterBufferSize = info->width*decoded_height*4*3*sizeof(PIXEL);
}
//#endif
if(decoder->RawBayer16 == NULL || decoder->RGBFilterBuffer16 == NULL)
{
decoder->error = CODEC_ERROR_MEMORY_ALLOC;
return;
}
if(decoder->RawBayer16)
{
uint8_t *line;
PIXEL16U *bayer_line, *bayerptr, *outA16, *outB16;
PIXEL16U *G,*RG,*BG,*GD;
int x,y;
int bayer_pitch = info->width*4;
//float scale = 256.0;
//int matrix_non_unity = 0;
//int wb_non_unity = 0;
//float curve2lin[2048];
//float lin2curve[2048+512+2];
#if 0
static float rgb2yuv[3][4] =
{
{0.183f, 0.614f, 0.062f, 16.0f/256.0f},
{-0.101f,-0.338f, 0.439f, 0.5f},
{0.439f,-0.399f,-0.040f, 0.5f}
};
float mtrx[3][4] =
{
{1.0f, 0, 0, 0},
{0, 1.0f, 0, 0},
{0, 0, 1.0f, 0}
};
float whitebalance[3] = { 1.0f, 1.0f, 1.0f };
#endif
#if 0 // Matrix disabled as it can only be correct handled by the 3D LUT due to the required linear conversions
/* if(decoder->cfhddata.MagicNumber == CFHDDATA_MAGIC_NUMBER && decoder->cfhddata.version >= 2)
{
float fval = 0.0;
int i;
for(i=0; i<12; i++)
{
mtrx[i>>2][i&3] = fval = decoder->cfhddata.colormatrix[i>>2][i&3];
if((i>>2) == (i&3))
{
if(fval != 1.0)
{
matrix_non_unity = 1;
}
}
else
{
if(fval != 0.0)
{
matrix_non_unity = 1;
}
}
}
// not active as VFW isn't yet support the 3D LUTs
if(decoder->cfhddata.version >= 5)
{
int j;
float encode_curvebase = 90.0;
float decode_curvebase = 90.0;
int encode_curve_type = decoder->cfhddata.encode_curve >> 16;
int decode_curve_type = decoder->cfhddata.decode_curve >> 16;
if(decoder->cfhddata.user_white_balance[0] > 0.0)
{
wb_non_unity = 1;
whitebalance[0] = decoder->cfhddata.user_white_balance[0];
whitebalance[1] = (decoder->cfhddata.user_white_balance[1]+decoder->cfhddata.user_white_balance[2])/2.0;
whitebalance[2] = decoder->cfhddata.user_white_balance[3];
}
if(encode_curve_type) //1 or 2
encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff);
else
{
encode_curve_type = 1;
encode_curvebase = 90.0;
}
if(decode_curve_type) //1 or 2
decode_curvebase = (float)((decoder->cfhddata.decode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.decode_curve & 0xff);
else
{
decode_curve_type = 1;
decode_curvebase = 90.0;
}
for(j=0; j<2048; j++)
{
if(encode_curve_type == 1)
curve2lin[j] = CURVE_LOG2LIN((float)j/2047.0,encode_curvebase);
else
curve2lin[j] = CURVE_GAM2LIN((float)j/2047.0,encode_curvebase);
}
for(j=-512; j<=2048; j++) // -1 to +4
{
if(encode_curve_type == CURVE_TYPE_LOG)
lin2curve[j+512] = CURVE_LIN2LOG((float)j/512.0,encode_curvebase);
else
lin2curve[j+512] = CURVE_LIN2GAM((float)j/512.0,encode_curvebase);
}
}
}*/
#endif
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RawBayer16, bayer_pitch*sizeof(PIXEL),
info, chroma_offset, precision);
#else
// Decode that last transform to rows of Bayer data (one row per channel)
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
decoder->RawBayer16, bayer_pitch*sizeof(PIXEL), info,
&decoder->scratch, chroma_offset, precision);
#endif
if(resolution == DECODED_RESOLUTION_FULL_DEBAYER &&
(info->format < DECODED_FORMAT_BYR1 || info->format > DECODED_FORMAT_BYR4))
{
#if _THREADED //DemosaicRAW
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
assert(0) // old code disabled
/* int bayer_format = decoder->cfhddata.bayer_format;
unsigned char *outA8, *outB8;
unsigned short *lineStartA16, *lineStartB16;
unsigned short *lineA16, *lineB16;
// int stats1=0, stats2=0, statsd=0;
// double dstats1=0, dstats2=0, dstatsd=0;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height+DEMOSAIC_DELAYLINES; y++)
{
bayer_line = decoder->RawBayer16;
bayer_line += bayer_pitch * y;
if(y<info->height)
{
ColorDifference2Bayer(info->width,
bayer_line, bayer_pitch, bayer_format);
}
if(y>=3+DEMOSAIC_DELAYLINES && y<info->height-3+DEMOSAIC_DELAYLINES) //middle scanline
{
unsigned short *delayptr = decoder->RawBayer16;
delayptr += bayer_pitch * (y-DEMOSAIC_DELAYLINES);
BayerRippleFilter(info->width,
delayptr, bayer_pitch, bayer_format, decoder->RawBayer16);
}
if(y>=DEMOSAIC_DELAYLINES)
{
int delay_y = y - DEMOSAIC_DELAYLINES;
unsigned short *sptr, scanline[8192*3];
outA8 = line;
line += pitch;
outB8 = line;
line += pitch;
sptr = scanline;
DebayerLine(info->width*2, info->height*2, delay_y*2,
decoder->RawBayer16, bayer_format, sptr, sharpening);
for(x=0; x<info->width*2; x++)
{
outA8[2] = *sptr++>>8;
outA8[1] = *sptr++>>8;
outA8[0] = *sptr++>>8;
outA8+=3;
}
for(x=0; x<info->width*2; x++)
{
outB8[2] = *sptr++>>8;
outB8[1] = *sptr++>>8;
outB8[0] = *sptr++>>8;
outB8+=3;
}
}
}*/
#endif // _THREADED
}
else
if(format == DECODED_FORMAT_BYR2 || format == DECODED_FORMAT_BYR4)
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
assert(0) // old code disabled
/* {
int bayer_format = decoder->cfhddata.bayer_format;
// int stats1=0, stats2=0, statsd=0;
// double dstats1=0, dstats2=0, dstatsd=0;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height; y++)
{
outA16 = (PIXEL16U *)line;
line += pitch;
outB16 = (PIXEL16U *)line;
line += pitch;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = BG + bayer_pitch/4;
for(x=0; x<info->width; x++)
{
int r,g,b,rg,bg,gd,g1,g2,y1,y2,u,v,dither;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
gd = (*GD++) - 32768;
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
g1 = g + gd;
g2 = g - gd; //TODO: Is there a DC offset to gd (causes a check in output )
// stats1+=g1;
// stats2+=g2;
// statsd+=gd;
if(r < 0) r = 0;
if(g1 < 0) g1 = 0;
if(g2 < 0) g2 = 0;
if(b < 0) b = 0;
if(r > 0xffff) r = 0xffff;
if(g1 > 0xffff) g1 = 0xffff;
if(g2 > 0xffff) g2 = 0xffff;
if(b > 0xffff) b = 0xffff;
switch(bayer_format)
{
case BAYER_FORMAT_RED_GRN: //Red-grn phase
*outA16++ = r;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = b;
break;
case BAYER_FORMAT_GRN_RED:// grn-red
*outA16++ = g1;
*outA16++ = r;
*outB16++ = b;
*outB16++ = g2;
break;
case BAYER_FORMAT_GRN_BLU:
*outA16++ = g1;
*outA16++ = b;
*outB16++ = r;
*outB16++ = g2;
break;
case BAYER_FORMAT_BLU_GRN:
*outA16++ = b;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = r;
break;
}
}
bayer_line += bayer_pitch;
}
if(decoder->flags & DECODER_FLAGS_HIGH_QUALITY)
{
int bayer_format = decoder->cfhddata.bayer_format;
for(y=2; y<info->height-3; y++)
{
int offset = pitch>>1;
line = output; //0
line += pitch * y * 2;
// If on a red line, move to a blue line
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_RED_GRN)
line -= pitch;
{
int offset = pitch>>1;
outA16 = (PIXEL16U *)line;
outA16++; //g //for BAYER_FORMAT_RED_GRN input
outA16++; //b
outA16++; //g
outA16++; //b
//point to green pixel with *outA16
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_GRN_BLU)
outA16++;
for(x=2; x<info->width-2; x++)
{
int mn,mx,g;
int range = 8*256; //1<<11
int shift = 11;
int delta;
int alpha;
g = *outA16;
// lines below do not need to be tested for a corrected value
mn = mx = outA16[offset+1];
if(mn > outA16[offset-1]) mn = outA16[offset-1];
if(mx < outA16[offset-1]) mx = outA16[offset-1];
if((outA16[-offset-1] & 1)==0)
{
if(mn > outA16[-offset-1]) mn = outA16[-offset-1];
if(mx < outA16[-offset-1]) mx = outA16[-offset-1];
}
if((outA16[-offset+1] & 1)==0)
{
if(mn > outA16[-offset+1]) mn = outA16[-offset+1];
if(mx < outA16[-offset+1]) mx = outA16[-offset+1];
}
delta = mx - mn;
if(delta < range && ((mn-range < g && g < mn) || (mx+range > g && g > mx)))
{
int gmn,gmx;
gmn = gmx = g;
if((outA16[-2*offset-2] & 1)==0)
{
if(gmn > outA16[-2*offset-2]) gmn = outA16[-2*offset-2];
if(gmx < outA16[-2*offset-2]) gmx = outA16[-2*offset-2];
}
if((outA16[-2*offset] & 1)==0)
{
if(gmn > outA16[-2*offset]) gmn = outA16[-2*offset];
if(gmx < outA16[-2*offset]) gmx = outA16[-2*offset];
}
if((outA16[-2*offset+2] & 1)==0)
{
if(gmn > outA16[-2*offset+2]) gmn = outA16[-2*offset+2];
if(gmx < outA16[-2*offset+2]) gmx = outA16[-2*offset+2];
}
if((outA16[-2] & 1)==0)
{
if(gmn > outA16[-2]) gmn = outA16[-2];
if(gmx < outA16[-2]) gmx = outA16[-2];
}
// lines below do not need to be tested for a corrected value
if(gmn > outA16[2*offset-2]) gmn = outA16[2*offset-2];
if(gmx < outA16[2*offset-2]) gmx = outA16[2*offset-2];
if(gmn > outA16[2*offset]) gmn = outA16[2*offset];
if(gmx < outA16[2*offset]) gmx = outA16[2*offset];
if(gmn > outA16[2*offset+2]) gmn = outA16[2*offset+2];
if(gmx < outA16[2*offset+2]) gmx = outA16[2*offset+2];
if(gmn > outA16[2]) gmn = outA16[2];
if(gmx < outA16[2]) gmx = outA16[2];
if((gmx - gmn) < range)
{
alpha = range;//delta;
if(g > mx)
{
alpha *= (g-mx); //max range
alpha >>= shift;
}
else // g < mn
{
alpha *= (mn-g); //max range
alpha >>= shift;
}
alpha *= alpha;
alpha >>= shift;
// avg = (outA16[-offset-1] + outA16[offset-1] + outA16[-offset+1] + outA16[offset+1] + 2) >> 2;
// *outA16 = avg; //good
// *outA16 = mn; //spotty
if( (abs(outA16[offset] - outA16[-offset]) < range)
&& ((abs(outA16[1] - outA16[-1]) < range)))
{
int val = (alpha*g + (range - alpha)*((mn+mx)>>1))>>shift;
if(val > 0xffff) val = 0xffff;
if(val < 0) val = 0;
val |= 1;
*outA16 = val;
// *outA16 = ((mn+mx)>>1) | 1; // like avg but less compute
}
}
}
outA16++; //g
outA16++; //b
}
}
}
}
}*/
#endif
}
// Pack the rows of Bayer data (full resolution progressive) into BYR3 format?
else if (format == DECODED_FORMAT_BYR3)
{
PIXEL16U *outR, *outG1, *outG2, *outB;
// int stats1=0, stats2=0, statsd=0;
// double dstats1=0, dstats2=0, dstatsd=0;
// #pragma omp parallel for
for(y=0; y<info->height; y++)
{
uint8_t *line = output;
PIXEL *bayerptr = (PIXEL *)decoder->RawBayer16;
line += pitch*2*y;
bayerptr += bayer_pitch * y;
outR = (PIXEL16U *)line;
outG1 = outR + (pitch/4);
outG2 = outR + (pitch/4)*2;
outB = outR + (pitch/4)*3;
G = (PIXEL16U *)bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = BG + bayer_pitch/4;
// Pack the rows of Bayer components into the BYR3 pattern
#if (1 && XMMOPT)
{
__m128i *G_128 = (__m128i *)G;
__m128i *RG_128 = (__m128i *)RG;
__m128i *BG_128 = (__m128i *)BG;
__m128i *GD_128 = (__m128i *)GD;
__m128i *outR_128 = (__m128i *)outR;
__m128i *outG1_128 = (__m128i *)outG1;
__m128i *outG2_128 = (__m128i *)outG2;
__m128i *outB_128 = (__m128i *)outB;
__m128i limiter = _mm_set1_epi16(0x7fff - 0x3ff);
__m128i midpoint1 = _mm_set1_epi16(32768>>6);
__m128i midpoint2 = _mm_set1_epi16(32768>>5);
int column_step = 8;
int post_column = (info->width) - ((info->width) % column_step);
for (x=0; x < post_column; x += column_step)
{
__m128i r_128;
__m128i g1_128;
__m128i g2_128;
__m128i b_128;
__m128i g_128;
__m128i rg_128;
__m128i bg_128;
__m128i gd_128;
g_128 = _mm_load_si128(G_128++);
rg_128 = _mm_load_si128(RG_128++);
bg_128 = _mm_load_si128(BG_128++);
gd_128 = _mm_load_si128(GD_128++);
g_128 = _mm_srli_epi16(g_128, 6);
rg_128 = _mm_srli_epi16(rg_128, 5);
bg_128 = _mm_srli_epi16(bg_128, 5);
gd_128 = _mm_srli_epi16(gd_128, 6);
gd_128 = _mm_subs_epi16(gd_128, midpoint1);
rg_128 = _mm_subs_epi16(rg_128, midpoint2);
bg_128 = _mm_subs_epi16(bg_128, midpoint2);
r_128 = _mm_adds_epi16(rg_128, g_128);
b_128 = _mm_adds_epi16(bg_128, g_128);
g1_128 = _mm_adds_epi16(g_128, gd_128);
g2_128 = _mm_subs_epi16(g_128, gd_128);
r_128 = _mm_adds_epi16(r_128, limiter);
r_128 = _mm_subs_epu16(r_128, limiter);
g1_128 = _mm_adds_epi16(g1_128, limiter);
g1_128 = _mm_subs_epu16(g1_128, limiter);
g2_128 = _mm_adds_epi16(g2_128, limiter);
g2_128 = _mm_subs_epu16(g2_128, limiter);
b_128 = _mm_adds_epi16(b_128, limiter);
b_128 = _mm_subs_epu16(b_128, limiter);
_mm_store_si128(outR_128++, r_128);
_mm_store_si128(outG1_128++, g1_128);
_mm_store_si128(outG2_128++, g2_128);
_mm_store_si128(outB_128++, b_128);
}
G = (PIXEL16U *)G_128;
RG = (PIXEL16U *)RG_128;
BG = (PIXEL16U *)BG_128;
GD = (PIXEL16U *)GD_128;
outR = (PIXEL16U *)outR_128;
outG1 = (PIXEL16U *)outG1_128;
outG2 = (PIXEL16U *)outG2_128;
outB = (PIXEL16U *)outB_128;
}
#endif
for(; x<info->width; x++)
{
int r,g,b,rg,bg,gd,g1,g2;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
gd = (*GD++) - 32768;
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
g1 = g + gd;
g2 = g - gd; //TODO: Is there a DC offset to gd (causes a check in output )
if(r < 0) r = 0;
if(g1 < 0) g1 = 0;
if(g2 < 0) g2 = 0;
if(b < 0) b = 0;
if(r > 0xffff) r = 0xffff;
if(g1 > 0xffff) g1 = 0xffff;
if(g2 > 0xffff) g2 = 0xffff;
if(b > 0xffff) b = 0xffff;
//Red-grn phase
*outR++ = r>>6;
*outG1++ = g1>>6;
*outG2++ = g2>>6;
*outB++ = b>>6;
}
}
}
// Pack the rows of Bayer data (full resolution progressive) into BYR4 format?
else if (format == DECODED_FORMAT_BYR4)
{
int bayer_format = decoder->cfhddata.bayer_format;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height; y++)
{
outA16 = (PIXEL16U *)line;
line += pitch;
outB16 = (PIXEL16U *)line;
line += pitch;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = BG + bayer_pitch/4;
for(x=0; x<info->width; x++)
{
//int r,g,b,rg,bg,gd,g1,g2,y1,y2,u,v,dither;
int32_t r, g, b, rg, bg, gd, g1, g2;
// The output of the inverse transform is unsigned 16-bit integers
const int midpoint = 32768;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
gd = (*GD++) - midpoint;
r = ((rg - midpoint)<<1) + g;
b = ((bg - midpoint)<<1) + g;
g1 = g + gd;
g2 = g - gd;
r = SATURATE_16U(r);
g1 = SATURATE_16U(g1);
g2 = SATURATE_16U(g2);
b = SATURATE_16U(b);
// stats1+=g1;
// stats2+=g2;
// statsd+=gd;
switch(bayer_format)
{
case BAYER_FORMAT_RED_GRN: //Red-grn phase
*outA16++ = r;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = b;
break;
case BAYER_FORMAT_GRN_RED:// grn-red
*outA16++ = g1;
*outA16++ = r;
*outB16++ = b;
*outB16++ = g2;
break;
case BAYER_FORMAT_GRN_BLU:
*outA16++ = g1;
*outA16++ = b;
*outB16++ = r;
*outB16++ = g2;
break;
case BAYER_FORMAT_BLU_GRN:
*outA16++ = b;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = r;
break;
default:
// Unsupported Bayer format
assert(0);
*outA16++ = 0;
*outA16++ = 0;
*outB16++ = 0;
*outB16++ = 0;
break;
}
}
bayer_line += bayer_pitch;
}
if(decoder->flags & DECODER_FLAGS_HIGH_QUALITY)
{
for(y=2; y<info->height-3; y++)
{
//int offset = pitch>>1;
line = output; //0
line += pitch * y * 2;
// If on a red line, move to a blue line
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_RED_GRN)
line -= pitch;
{
int offset = pitch>>1;
outA16 = (PIXEL16U *)line;
outA16++; //g //for BAYER_FORMAT_RED_GRN input
outA16++; //b
outA16++; //g
outA16++; //b
//point to green pixel with *outA16
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_GRN_BLU)
outA16++;
for(x=2; x<info->width-2; x++)
{
int mn,mx,g;
int range = 8*256; //1<<11
int shift = 11;
int delta;
int alpha;
g = *outA16;
// lines below do not need to be tested for a corrected value
mn = mx = outA16[offset+1];
if(mn > outA16[offset-1]) mn = outA16[offset-1];
if(mx < outA16[offset-1]) mx = outA16[offset-1];
if((outA16[-offset-1] & 1)==0)
{
if(mn > outA16[-offset-1]) mn = outA16[-offset-1];
if(mx < outA16[-offset-1]) mx = outA16[-offset-1];
}
if((outA16[-offset+1] & 1)==0)
{
if(mn > outA16[-offset+1]) mn = outA16[-offset+1];
if(mx < outA16[-offset+1]) mx = outA16[-offset+1];
}
delta = mx - mn;
if(delta < range && ((mn-range < g && g < mn) || (mx+range > g && g > mx)))
{
int gmn,gmx;
gmn = gmx = g;
if((outA16[-2*offset-2] & 1)==0)
{
if(gmn > outA16[-2*offset-2]) gmn = outA16[-2*offset-2];
if(gmx < outA16[-2*offset-2]) gmx = outA16[-2*offset-2];
}
if((outA16[-2*offset] & 1)==0)
{
if(gmn > outA16[-2*offset]) gmn = outA16[-2*offset];
if(gmx < outA16[-2*offset]) gmx = outA16[-2*offset];
}
if((outA16[-2*offset+2] & 1)==0)
{
if(gmn > outA16[-2*offset+2]) gmn = outA16[-2*offset+2];
if(gmx < outA16[-2*offset+2]) gmx = outA16[-2*offset+2];
}
if((outA16[-2] & 1)==0)
{
if(gmn > outA16[-2]) gmn = outA16[-2];
if(gmx < outA16[-2]) gmx = outA16[-2];
}
// lines below do not need to be tested for a corrected value
if(gmn > outA16[2*offset-2]) gmn = outA16[2*offset-2];
if(gmx < outA16[2*offset-2]) gmx = outA16[2*offset-2];
if(gmn > outA16[2*offset]) gmn = outA16[2*offset];
if(gmx < outA16[2*offset]) gmx = outA16[2*offset];
if(gmn > outA16[2*offset+2]) gmn = outA16[2*offset+2];
if(gmx < outA16[2*offset+2]) gmx = outA16[2*offset+2];
if(gmn > outA16[2]) gmn = outA16[2];
if(gmx < outA16[2]) gmx = outA16[2];
if((gmx - gmn) < range)
{
alpha = range;//delta;
if(g > mx)
{
alpha *= (g-mx); //max range
alpha >>= shift;
}
else // g < mn
{
alpha *= (mn-g); //max range
alpha >>= shift;
}
alpha *= alpha;
alpha >>= shift;
// avg = (outA16[-offset-1] + outA16[offset-1] + outA16[-offset+1] + outA16[offset+1] + 2) >> 2;
// *outA16 = avg; //good
// *outA16 = mn; //spotty
if( (abs(outA16[offset] - outA16[-offset]) < range)
&& ((abs(outA16[1] - outA16[-1]) < range)))
{
int val = (alpha*g + (range - alpha)*((mn+mx)>>1))>>shift;
if(val > 0xffff) val = 0xffff;
if(val < 0) val = 0;
val |= 1;
*outA16 = val;
// *outA16 = ((mn+mx)>>1) | 1; // like avg but less compute
}
}
}
outA16++; //g
outA16++; //b
}
}
}
}
// Linear restore
{
unsigned short *buff = (unsigned short *)output;
//static int pos = 0;
for(y=0; y<info->height*2; y++)
{
for(x=0; x<info->width*2; x++)
{
float val = (float)buff[y*info->width*2 + x]/65535.0f;
float encode_curvebase = 90.0;
int encode_curve_type = CURVE_TYPE_LOG;
int encode_curve_neg;
if((decoder->cfhddata.encode_curve)>>16) //1 or 2
{
encode_curve_type = (decoder->cfhddata.encode_curve)>>16;
if(encode_curve_type & CURVE_TYPE_EXTENDED)
encode_curvebase = (float)(decoder->cfhddata.encode_curve & 0xffff); // use all 16-bits for larger log bases
else
encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff);
}
if(encode_curvebase == 1.0 && encode_curve_type <= CURVE_TYPE_LINEAR)
encode_curve_type = CURVE_TYPE_LINEAR;
encode_curve_neg = encode_curve_type & CURVE_TYPE_NEGATIVE;
switch(encode_curve_type & CURVE_TYPE_MASK)
{
case CURVE_TYPE_LOG:
val = CURVE_LOG2LIN(val,encode_curvebase);
break;
case CURVE_TYPE_GAMMA:
val = CURVE_GAM2LIN(val,encode_curvebase);
break;
case CURVE_TYPE_CINEON:
val = CURVE_CINEON2LIN(val,encode_curvebase);
break;
case CURVE_TYPE_CINE985:
val = CURVE_CINE9852LIN(val,encode_curvebase);
break;
case CURVE_TYPE_PARA:
val = CURVE_PARA2LIN(val,(int)((decoder->cfhddata.encode_curve >> 8) & 0xff), (int)(decoder->cfhddata.encode_curve & 0xff));
break;
case CURVE_TYPE_CSTYLE:
val = CURVE_CSTYLE2LIN((float)val,(int)((decoder->cfhddata.encode_curve >> 8) & 0xff));
break;
case CURVE_TYPE_SLOG:
val = CURVE_SLOG2LIN((float)val);
break;
case CURVE_TYPE_LOGC:
val = CURVE_LOGC2LIN((float)val);
break;
case CURVE_TYPE_LINEAR:
default:
break;
}
buff[y*info->width*2 + x] = (int)(val*4095.0);
}
}
}
}
else
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
//unsigned short scanline[8192*3],*sptr;
//unsigned short scanline2[8192*3],*sptr2;
unsigned short *scanline,*sptr;
unsigned short *scanline2,*sptr2;
char *buffer = decoder->scratch.free_ptr;
size_t buffer_size = decoder->scratch.free_size;
uint8_t *outyuv,*line = output;
PIXEL *bayerptr;
int x,y;
if(buffer_size < info->width * 2 * 3 * 2)
assert(0); // not enough memory
scanline = (unsigned short *)buffer;
buffer += info->width * 2 * 3;
scanline2 = (unsigned short *)buffer;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height; y++)
{
int r,g,b,rg,bg,y1,y2,u,v;
int r1,g1,b1;
int i;
__m128i gggggggg,ggggggg2,rgrgrgrg,bgbgbgbg;
__m128i rrrrrrrr,bbbbbbbb;
__m128i mid8192 = _mm_set1_epi16(8192);
__m128i mid16384 = _mm_set1_epi16(16384);
__m128i mid32768 = _mm_set1_epi16(32768);
__m128i overflowprotectRGB_epi16 = _mm_set1_epi16(0x7fff-0x3fff);
int sse2width = info->width & 0xfff8;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = BG + bayer_pitch/4;
sptr = scanline;
x = 0;
for(; x<sse2width; x+=8)
{
gggggggg = _mm_loadu_si128((__m128i *)G); G+=8;
rgrgrgrg = _mm_loadu_si128((__m128i *)RG); RG+=8;
bgbgbgbg = _mm_loadu_si128((__m128i *)BG); BG+=8;
ggggggg2 = _mm_srli_epi16(gggggggg, 2);// 0-16383 14bit unsigned
rgrgrgrg = _mm_srli_epi16(rgrgrgrg, 2);// 14bit unsigned
bgbgbgbg = _mm_srli_epi16(bgbgbgbg, 2);// 14bit unsigned
rrrrrrrr = _mm_subs_epi16(rgrgrgrg, mid8192);// -8191 to 8191 14bit signed
rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 1); // -16382 to 16382 15bit signed
rrrrrrrr = _mm_adds_epi16(rrrrrrrr, ggggggg2); // -16382 to 32767
bbbbbbbb = _mm_subs_epi16(bgbgbgbg, mid8192);// -8191 to 8191 14bit signed
bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 1); // -16382 to 16382 15bit signed
bbbbbbbb = _mm_adds_epi16(bbbbbbbb, ggggggg2); // -16382 to 32767
//limit to 0 to 16383
rrrrrrrr = _mm_adds_epi16(rrrrrrrr, overflowprotectRGB_epi16);
rrrrrrrr = _mm_subs_epu16(rrrrrrrr, overflowprotectRGB_epi16);
//limit to 0 to 16383
bbbbbbbb = _mm_adds_epi16(bbbbbbbb, overflowprotectRGB_epi16);
bbbbbbbb = _mm_subs_epu16(bbbbbbbb, overflowprotectRGB_epi16);
rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 2); // restore to 0 to 65535
bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 2); // restore to 0 to 65535
*sptr++ = _mm_extract_epi16(rrrrrrrr, 0);
*sptr++ = _mm_extract_epi16(gggggggg, 0);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 0);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 1);
*sptr++ = _mm_extract_epi16(gggggggg, 1);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 1);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 2);
*sptr++ = _mm_extract_epi16(gggggggg, 2);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 2);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 3);
*sptr++ = _mm_extract_epi16(gggggggg, 3);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 3);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 4);
*sptr++ = _mm_extract_epi16(gggggggg, 4);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 4);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 5);
*sptr++ = _mm_extract_epi16(gggggggg, 5);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 5);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 6);
*sptr++ = _mm_extract_epi16(gggggggg, 6);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 6);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 7);
*sptr++ = _mm_extract_epi16(gggggggg, 7);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 7);
}
for(; x<info->width; x++)
{
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
if(r < 0) r = 0; if(r > 0xffff) r = 0xffff;
if(g < 0) g = 0; if(g > 0xffff) g = 0xffff;
if(b < 0) b = 0; if(b > 0xffff) b = 0xffff;
*sptr++ = r;
*sptr++ = g;
*sptr++ = b;
}
{
int flags = 0;
int whitebitdepth = 16;
sptr = scanline;
if(decoder->apply_color_active_metadata)
sptr = ApplyActiveMetaData(decoder, info->width, 1, y, scanline, scanline2,
info->format, &whitebitdepth, &flags);
ConvertLinesToOutput(decoder, info->width, 1, sptr, line, pitch,
info->format, whitebitdepth, flags);
}
line += pitch;
bayer_line += bayer_pitch;
}
#endif
}
/* // switch to using the ApplyActiveMetaData() and ConvertLinesToOutput() calls - DAN20071201
// Pack the rows of Bayer data (full resolution progressive) into BYR2 format?
else if (format == DECODED_FORMAT_YUYV)
{
line = output;
bayer_line = decoder->RawBayer16;
scale = 256.0;
y_rmult = ((rgb2yuv[0][0]) * scale);
y_gmult = ((rgb2yuv[0][1]) * scale);
y_bmult = ((rgb2yuv[0][2]) * scale);
y_offset= ((rgb2yuv[0][3]) * scale);
u_rmult = ((rgb2yuv[1][0]) * scale);
u_gmult = ((rgb2yuv[1][1]) * scale);
u_bmult = ((rgb2yuv[1][2]) * scale);
u_offset= ((rgb2yuv[1][3]) * scale);
v_rmult = ((rgb2yuv[2][0]) * scale);
v_gmult = ((rgb2yuv[2][1]) * scale);
v_bmult = ((rgb2yuv[2][2]) * scale);
v_offset= ((rgb2yuv[2][3]) * scale);
r_rmult= (mtrx[0][0] * scale * whitebalance[0]);
r_gmult= (mtrx[0][1] * scale * whitebalance[1]);
r_bmult= (mtrx[0][2] * scale * whitebalance[2]);
r_offset= (mtrx[0][3] * scale);
g_rmult= (mtrx[1][0] * scale * whitebalance[0]);
g_gmult= (mtrx[1][1] * scale * whitebalance[1]);
g_bmult= (mtrx[1][2] * scale * whitebalance[2]);
g_offset= (mtrx[1][3] * scale);
b_rmult= (mtrx[2][0] * scale * whitebalance[0]);
b_gmult= (mtrx[2][1] * scale * whitebalance[1]);
b_bmult= (mtrx[2][2] * scale * whitebalance[2]);
b_offset= (mtrx[2][3] * scale);
for(y=0; y<info->height; y++)
{
outyuv = line;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v,dither;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>8);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>8);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>8);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y1= ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u = (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v = ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>8);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>8);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>8);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y2 = ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u += (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v += ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
u >>= 1;
v >>= 1;
y1 += y_offset;
y2 += y_offset;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 255) y1 = 255;
if(y2 < 0) y2 = 0;
if(y2 > 255) y2 = 255;
if(u < 0) u = 0;
if(u > 255) u = 255;
if(v < 0) v = 0;
if(v > 255) v = 255;
*outyuv++ = y1;
*outyuv++ = u;
*outyuv++ = y2;
*outyuv++ = v;
}
line += pitch;
bayer_line += bayer_pitch;
}
}
else if (format == DECODED_FORMAT_YU64)
{
int shift = 14;
PIXEL16U *outyuv64;
line = output;
bayer_line = decoder->RawBayer16;
scale = 16384.0;
//_mm_empty(); // Clear the mmx register state
y_rmult = ((rgb2yuv[0][0]) * scale);
y_gmult = ((rgb2yuv[0][1]) * scale);
y_bmult = ((rgb2yuv[0][2]) * scale);
y_offset= ((rgb2yuv[0][3]) * scale * 4.0);
u_rmult = ((rgb2yuv[1][0]) * scale);
u_gmult = ((rgb2yuv[1][1]) * scale);
u_bmult = ((rgb2yuv[1][2]) * scale);
u_offset= ((rgb2yuv[1][3]) * scale * 4.0);
v_rmult = ((rgb2yuv[2][0]) * scale);
v_gmult = ((rgb2yuv[2][1]) * scale);
v_bmult = ((rgb2yuv[2][2]) * scale);
v_offset= ((rgb2yuv[2][3]) * scale * 4.0);
scale = 4096.0;
r_rmult= (mtrx[0][0] * scale * whitebalance[0]);
r_gmult= (mtrx[0][1] * scale * whitebalance[1]);
r_bmult= (mtrx[0][2] * scale * whitebalance[2]);
r_offset= (mtrx[0][3] * scale);
g_rmult= (mtrx[1][0] * scale * whitebalance[0]);
g_gmult= (mtrx[1][1] * scale * whitebalance[1]);
g_bmult= (mtrx[1][2] * scale * whitebalance[2]);
g_offset= (mtrx[1][3] * scale);
b_rmult= (mtrx[2][0] * scale * whitebalance[0]);
b_gmult= (mtrx[2][1] * scale * whitebalance[1]);
b_bmult= (mtrx[2][2] * scale * whitebalance[2]);
b_offset= (mtrx[2][3] * scale);
y_offset += 26;
u_offset += 26;
v_offset += 26;
for(y=0; y<info->height; y++)
{
outyuv64 = (PIXEL16U *)line;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v,dither;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>12);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>12);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>12);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y1= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u = (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v = (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>12);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>12);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>12);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y2= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u+= (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v+= (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
u >>= 1;
v >>= 1;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 65535) y1 = 65535;
if(y2 < 0) y2 = 0;
if(y2 > 65535) y2 = 65535;
if(u < 0) u = 0;
if(u > 65535) u = 65535;
if(v < 0) v = 0;
if(v > 65535) v = 65535;
*outyuv64++ = y1;
*outyuv64++ = v;
*outyuv64++ = y2;
*outyuv64++ = u;
}
line += pitch;
bayer_line += bayer_pitch;
}
}
else //RGBs
{
line = output;
bayer_line = decoder->RawBayer16;
scale = 256.0;
r_rmult = (mtrx[0][0]) * scale * whitebalance[0];
r_gmult = (mtrx[0][1]) * scale * whitebalance[1];
r_bmult = (mtrx[0][2]) * scale * whitebalance[2];
r_offset= (mtrx[0][3]) * scale;
g_rmult = (mtrx[1][0]) * scale * whitebalance[0];
g_gmult = (mtrx[1][1]) * scale * whitebalance[1];
g_bmult = (mtrx[1][2]) * scale * whitebalance[2];
g_offset= (mtrx[1][3]) * scale;
b_rmult = (mtrx[2][0]) * scale * whitebalance[0];
b_gmult = (mtrx[2][1]) * scale * whitebalance[1];
b_bmult = (mtrx[2][2]) * scale * whitebalance[2];
b_offset= (mtrx[2][3]) * scale;
for(y=0; y<info->height; y++)
{
int i,noisearray[32];
outyuv = line;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = RG + bayer_pitch/4;
for(i=0; i<32; i++)
{
noisearray[i] = (rand() & 127);
}
if(info->format == DECODED_FORMAT_RGB32)
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
int r,g,b,g1,g2,gdiff,y1,y2,u,v;
// g = (g1+g2)>>1;
// *g_row_ptr++ = g;
// *rg_row_ptr++ = (r-g+256)>>1;
// *bg_row_ptr++ = (b-g+256)>>1;
// *gdiff_row_ptr++ = (g1-g2+256)>>1;
g = ((*G++)>>1);
r = ((*RG++ + 64)>>0)-(256<<7)+g;
b = ((*BG++ + 64)>>0)-(256<<7)+g;
// gdiff = ((*GD++ + 64)>>7)-256+g;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
R1 = ((r*r_rmult + g*r_gmult + b*r_bmult + r_offset)>>8) + rnd;
G1 = ((r*g_rmult + g*g_gmult + b*g_bmult + g_offset)>>8) + rnd;
B1 = ((r*b_rmult + g*b_gmult + b*b_bmult + b_offset)>>8) + rnd;
//TODO : need on convert back to log/display curve.
}
else
{
R1 = r + rnd;
G1 = g + rnd;
B1 = b + rnd;
}
R1 >>= 7;
G1 >>= 7;
B1 >>= 7;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
*outyuv++ = 255;
}
}
else
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
int r,g,b,g1,g2,gdiff,y1,y2,u,v;
//g = (g1+g2)>>1;
// *g_row_ptr++ = g;
// *rg_row_ptr++ = (r-g+256)>>1;
// *bg_row_ptr++ = (b-g+256)>>1;
// *gdiff_row_ptr++ = (g1-g2+256)>>1;
g = ((*G++)>>1);
r = ((*RG++ + 64)>>0)-(256<<7)+g;
b = ((*BG++ + 64)>>0)-(256<<7)+g;
// gdiff = ((*GD++ + 64)>>7)-256+g;
if(matrix_non_unity)
{
//TODO: Need to convert to linear first.
R1 = ((r*r_rmult + g*r_gmult + b*r_bmult + r_offset)>>8) + rnd;
G1 = ((r*g_rmult + g*g_gmult + b*g_bmult + g_offset)>>8) + rnd;
B1 = ((r*b_rmult + g*b_gmult + b*b_bmult + b_offset)>>8) + rnd;
//TODO: Need to convert back to log/display curve.
}
else
{
R1 = r + rnd;
G1 = g + rnd;
B1 = b + rnd;
}
R1 >>= 7;
G1 >>= 7;
B1 >>= 7;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
}
}
line += pitch;
bayer_line += bayer_pitch;
}
}
*/
//MEMORY_ALIGNED_FREE(RawBayer16);
}
}
else
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
int precision = codec->precision;
if(decoder->RawBayer16 == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
size_t size = info->width*info->height*num_channels*sizeof(PIXEL);
decoder->RawBayer16 =
(PIXEL16U *)AllocAligned(allocator, size, 16);
#else
decoder->RawBayer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*info->height*num_channels*sizeof(PIXEL), 16);
#endif
decoder->RawBayerSize = info->width*info->height*num_channels*sizeof(PIXEL);
}
//#ifdef SHARPENING
if(decoder->RGBFilterBuffer16 == NULL)
{
int frame_size = info->width*decoded_height*4*3*sizeof(PIXEL);
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
frame_size = info->width*decoded_height*4*4*sizeof(PIXEL);
#if _ALLOCATOR
{
ALLOCATOR *allocator = decoder->allocator;
decoder->RGBFilterBuffer16 =
(PIXEL16U *)AllocAligned(allocator, frame_size, 16);
}
#else
decoder->RGBFilterBuffer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16);
#endif
decoder->RGBFilterBufferSize = frame_size;
}
//#endif
if(decoder->RawBayer16 == NULL || decoder->RGBFilterBuffer16 == NULL)
{
decoder->error = CODEC_ERROR_MEMORY_ALLOC;
return;
}
//TODO: Replace this memory allocation with a scratch buffer allocation
if(decoder->RawBayer16)
{
uint8_t *outyuv,*line, *source_line;
PIXEL16U *bayerptr;
PIXEL16U *G,*RG,*BG;
int x,y;
int src_pitch = info->width*num_channels*sizeof(PIXEL);
int y_rmult,y_gmult,y_bmult,y_offset;//shift=8;
int u_rmult,u_gmult,u_bmult,u_offset;
int v_rmult,v_gmult,v_bmult,v_offset;
float scale = 256.0;
//int matrix_non_unity = 0;
//int wb_non_unity = 0;
//float curve2lin[2048];
//float lin2curve[2048+512+2];
static float rgb2yuv[3][4] =
{
{0.183f, 0.614f, 0.062f, 16.0f/256.0f},
{-0.101f,-0.338f, 0.439f, 0.5f},
{0.439f,-0.399f,-0.040f, 0.5}
};
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RawBayer16, src_pitch,
info, chroma_offset, precision);
#else
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
decoder->RawBayer16, src_pitch, info,
&decoder->scratch, chroma_offset, precision);
#endif
if (format == DECODED_FORMAT_YUYV)
{
line = output;
source_line = (unsigned char *)decoder->RawBayer16;
scale = 256.0;
y_rmult = (int)((rgb2yuv[0][0]));
y_gmult = (int)((rgb2yuv[0][1]));
y_bmult = (int)((rgb2yuv[0][2]));
y_offset= (int)((rgb2yuv[0][3]));
u_rmult = (int)((rgb2yuv[1][0]));
u_gmult = (int)((rgb2yuv[1][1]));
u_bmult = (int)((rgb2yuv[1][2]));
u_offset= (int)((rgb2yuv[1][3]));
v_rmult = (int)((rgb2yuv[2][0]));
v_gmult = (int)((rgb2yuv[2][1]));
v_bmult = (int)((rgb2yuv[2][2]));
v_offset= (int)((rgb2yuv[2][3]));
for(y=0; y<info->height; y++)
{
outyuv = line;
bayerptr = (PIXEL16U *)source_line;
G = bayerptr;
RG = G + src_pitch/(2*num_channels);
BG = RG + src_pitch/(2*num_channels);
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
r1 = r;
g1 = g;
b1 = b;
y1= ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u = (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v = ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
r1 = r;
g1 = g;
b1 = b;
y2 = ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u += (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v += ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
u >>= 1;
v >>= 1;
y1 += y_offset;
y2 += y_offset;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 255) y1 = 255;
if(y2 < 0) y2 = 0;
if(y2 > 255) y2 = 255;
if(u < 0) u = 0;
if(u > 255) u = 255;
if(v < 0) v = 0;
if(v > 255) v = 255;
*outyuv++ = y1;
*outyuv++ = u;
*outyuv++ = y2;
*outyuv++ = v;
}
line += pitch;
source_line += src_pitch;
}
}
else if (format == DECODED_FORMAT_YU64)
{
int shift = 14;
PIXEL16U *outyuv64;
line = output;
source_line = (unsigned char *)decoder->RawBayer16;
scale = 16384.0;
y_rmult = (int)((rgb2yuv[0][0]) * scale);
y_gmult = (int)((rgb2yuv[0][1]) * scale);
y_bmult = (int)((rgb2yuv[0][2]) * scale);
y_offset= (int)((rgb2yuv[0][3]) * scale * 4.0f);
u_rmult = (int)((rgb2yuv[1][0]) * scale);
u_gmult = (int)((rgb2yuv[1][1]) * scale);
u_bmult = (int)((rgb2yuv[1][2]) * scale);
u_offset= (int)((rgb2yuv[1][3]) * scale * 4.0f);
v_rmult = (int)((rgb2yuv[2][0]) * scale);
v_gmult = (int)((rgb2yuv[2][1]) * scale);
v_bmult = (int)((rgb2yuv[2][2]) * scale);
v_offset= (int)((rgb2yuv[2][3]) * scale * 4.0f);
scale = 4096.0;
y_offset += 26;
u_offset += 26;
v_offset += 26;
for(y=0; y<info->height; y++)
{
outyuv64 = (PIXEL16U *)line;
bayerptr = (PIXEL16U *)source_line;
G = bayerptr;
RG = G + src_pitch/(2*num_channels);
BG = RG + src_pitch/(2*num_channels);
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
r1 = r;
g1 = g;
b1 = b;
y1= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u = (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v = (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
r1 = r;
g1 = g;
b1 = b;
y2= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u+= (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v+= (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
u >>= 1;
v >>= 1;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 65535) y1 = 65535;
if(y2 < 0) y2 = 0;
if(y2 > 65535) y2 = 65535;
if(u < 0) u = 0;
if(u > 65535) u = 65535;
if(v < 0) v = 0;
if(v > 65535) v = 65535;
*outyuv64++ = y1;
*outyuv64++ = v;
*outyuv64++ = y2;
*outyuv64++ = u;
}
line += pitch;
source_line += src_pitch;
}
}
else //RGBs
{
line = output;
source_line = (unsigned char *)decoder->RawBayer16;
for(y=0; y<info->height; y++)
{
int i,noisearray[32];
unsigned short *rgb16 = (unsigned short *)line;
outyuv = line;
bayerptr = (PIXEL16U *)source_line;
G = bayerptr;
RG = G + src_pitch/(2*num_channels);
BG = RG + src_pitch/(2*num_channels);
for(i=0; i<32; i++)
{
noisearray[i] = (rand() & 255);
}
if(info->format == DECODED_FORMAT_RGB32)
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
#if 0
G1 = (*G++) + rnd;
R1 = ((*RG++<<1) - (128<<9)) + G1;
B1 = ((*BG++<<1) - (128<<9)) + G1;
#else
G1 = (*G++) + rnd;
R1 = (*RG++) + rnd;
B1 = (*BG++) + rnd;
#endif
R1 >>= 8;
G1 >>= 8;
B1 >>= 8;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
*outyuv++ = 255;
}
}
else if(info->format == DECODED_FORMAT_RGB24)
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
#if 0
G1 = (*G++) + rnd;
R1 = ((*RG++<<1) - (128<<9)) + G1;
B1 = ((*BG++<<1) - (128<<9)) + G1;
#else
G1 = (*G++) + rnd;
R1 = (*RG++) + rnd;
B1 = (*BG++) + rnd;
#endif
R1 >>= 8;
G1 >>= 8;
B1 >>= 8;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
}
}
else if(info->format == DECODED_FORMAT_RG48)
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
G1 = (*G++);
R1 = (*RG++);
B1 = (*BG++);
*rgb16++ = R1;
*rgb16++ = G1;
*rgb16++ = B1;
}
}
line += pitch;
source_line += src_pitch;
}
}
//MEMORY_ALIGNED_FREE(RawBayer16);
}
}
else // Output the frame in one of the RGB 8-bit formats
{
//char *buffer = decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
// Invert the bottom wavelet and convert the output to the requested color format
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sYUVtoRGB);
#else
TransformInverseSpatialToBuffer(decoder, transform_array, frame, num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision);
#endif
}
}
}
#if TIMING
// Count the number of progressive frames that were decoded
progressive_decode_count++;
#endif
}
STOP(tk_inverse);
#ifdef ADOBE_MEMORY_FUNCTIONS
if((decoder->RawBayer16 && decoder->RawBayerSize > 2048*1152*2) ||
(decoder->RGBFilterBuffer16 && decoder->RGBFilterBufferSize > 2048*1152*2))
{
#if _ALLOCATOR
if(decoder->RawBayer16)
{
FreeAligned(decoder->allocator, decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = NULL;
}
if(decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
decoder->RGBFilterBufferSize = NULL;
}
#else
if(decoder->RawBayer16)
{
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = NULL;
}
if(decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
decoder->RGBFilterBufferSize = NULL;
}
#endif
}
#endif
#if (0 && DEBUG)
if (logfile) {
//uint8_t *subimage = output;
uint8_t *subimage = output + (2 * info->width) - 16;
DumpArray8u("YUV Image", subimage, 16, 16, pitch, logfile);
}
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Exit ReconstructFrameToBuffer\n");
}
#endif
#if (0 && DEBUG && _WIN32)
_CrtCheckMemory();
#endif
}
#if 0
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
uint8_t *frame1, uint8_t *frame2, int output_pitch,
FRAME_INFO *info, char *buffer, size_t buffer_size)
{
TRANSFORM **transform_array = decoder->transform;
int output_width = info->width;
int output_height = info->height;
PIXEL *low_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *high_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *out1_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *out2_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *bufptr = (PIXEL *)buffer;
uint8_t *output_row_ptr = output;
int low_pitch[CODEC_MAX_CHANNELS];
int high_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Check that there is enough space for the intermediate results from each channel
assert(output_width * sizeof(PIXEL) < buffer_size);
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet
IMAGE *low_wavelet = transform_array[channel]->wavelet[3];
IMAGE *high_wavelet = transform_array[channel]->wavelet[2];
// Get the pointers to the first row in each lowpass band
low_row_ptr[channel] = low_wavelet->band[0];
high_row_ptr[channel] = high_wavelet->band[0];
low_pitch[channel] = low_wavelet->pitch / sizeof(PIXEL);
high_pitch[channel] = high_wavelet->pitch / sizeof(PIXEL);
// Allocate space for one row of results for this channel
channel_row_ptr[channel] = bufptr;
bufptr += low_wavelet->width;
}
for (row = 0; row < output_height; row++)
{
char *bufptr = buffer;
for (channel = 0; channel < num_channels; channel++)
{
// Invert the temporal transform at quarter resolution
InvertTemporalQuarterRow16s(low_row_ptr[channel], high_row_ptr[channel], channel_row_ptr[channel]);
// Advance to the next row in each band for the temporal transform
low_row_ptr[channel] += low_pitch[channel];
high_row_ptr[channel] += high_pitch[channel];
}
// Pack the intermediate results into the output row
ConvertUnpacked16sRowToPacked8u(channel_row_ptr, num_channels, output_row_ptr, output_width);
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
#else
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
int frame_index, uint8_t *output, int output_pitch,
FRAME_INFO *info, const SCRATCH *scratch, int precision)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
TRANSFORM **transform_array = decoder->transform;
int output_width = info->width;
int output_height = info->height;
PIXEL *low_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *high_row_ptr[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr = output;
int low_pitch[CODEC_MAX_CHANNELS];
int high_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Value used for filling the fourth channel in ARGB output
int alpha = 255;
int format = COLORFORMAT(info);
int color_space = COLORSPACE(info);
int decoded_format = DECODEDFORMAT(info);
//bool inverted = false;
// The pixels are descaled in the inverse temporal transform
//const int descale = 0;
// Shift the intermediate results to 16-bit pixels
const int shift_yu64 = 8;
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
#if DEBUG
size_t buffer_size = scratch->free_size;
#endif
// Initialize a pointer for allocating space in the buffer
PIXEL *bufptr = (PIXEL *)buffer;
// Array of pointers to the start of each channel in the intermediate results
PIXEL *channel_row_ptr[CODEC_MAX_CHANNELS];
// Check that there is enough space for the intermediate results from each channel
#if DEBUG
assert(output_width * sizeof(PIXEL) < buffer_size);
#endif
ComputeCube(decoder);
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet
IMAGE *low_wavelet = transform_array[channel]->wavelet[4];
IMAGE *high_wavelet = transform_array[channel]->wavelet[3];
// Get the pointers to the first row in each lowpass band
low_row_ptr[channel] = low_wavelet->band[0];
high_row_ptr[channel] = high_wavelet->band[0];
low_pitch[channel] = low_wavelet->pitch / sizeof(PIXEL);
high_pitch[channel] = high_wavelet->pitch / sizeof(PIXEL);
// Force the row of intermediate results to be properly aligned
bufptr = (PIXEL *)ALIGN16(bufptr);
// Allocate space for one row of results for this channel
channel_row_ptr[channel] = bufptr;
bufptr += low_wavelet->width;
// Check that the row of intermediate results is properly aligned
assert(ISALIGNED16(channel_row_ptr[channel]));
}
// Invert the image if required
switch (decoded_format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
output_row_ptr += (output_height - 1) * output_pitch;
output_pitch = NEG(output_pitch);
}
//HACK: Seems to work, I don't know why. //DAN20070304
if (precision == 12) precision = 8;
// Apply the inverse temporal transform to the lowpass and highpass rows
for (row = 0; row < output_height; row++)
{
// Most of the color conversion routines use zero descaling
int descale = 0;
//char *bufptr = buffer;
for (channel = 0; channel < num_channels; channel++)
{
if (frame_index == 0)
{
// Invert the temporal transform at quarter resolution to get the even row
InvertTemporalQuarterEvenRow16s(low_row_ptr[channel], high_row_ptr[channel],
channel_row_ptr[channel], output_width, precision);
}
else
{
assert(frame_index == 1);
// Invert the temporal transform at quarter resolution to get the odd row
InvertTemporalQuarterOddRow16s(low_row_ptr[channel], high_row_ptr[channel],
channel_row_ptr[channel], output_width, precision);
}
// Advance to the next row in each band for the temporal transform
low_row_ptr[channel] += low_pitch[channel];
high_row_ptr[channel] += high_pitch[channel];
}
if(decoder->use_active_metadata_decoder)
{
uint8_t *channeldata[TRANSFORM_MAX_CHANNELS]; // used in quarter res decodes
int channelpitch[TRANSFORM_MAX_CHANNELS]; // used in quarter res decodes
int i;
FRAME_INFO info2;
memcpy(&info2, info, sizeof(FRAME_INFO));
info2.height = 1;
for(i=0;i<num_channels;i++)
{
channeldata[i] = (uint8_t *)channel_row_ptr[i];
channelpitch[i] = 0;
}
#if 1
{
__m128i *Y = (__m128i *)channeldata[0];
__m128i *U = (__m128i *)channeldata[1];
__m128i *V = (__m128i *)channeldata[2];
__m128i v;
int x;
__m128i rgb_limit_epi16 = _mm_set1_epi16(0x7fff - 0x0fff);
for(x=0;x<info->width;x+=8)
{
v = _mm_load_si128(Y);
v = _mm_adds_epi16(v, rgb_limit_epi16);
v = _mm_subs_epu16(v, rgb_limit_epi16);
v = _mm_slli_epi16(v, 4);
_mm_store_si128(Y++, v);
}
for(x=0;x<info->width/2;x+=8)
{
v = _mm_load_si128(U);
v = _mm_adds_epi16(v, rgb_limit_epi16);
v = _mm_subs_epu16(v, rgb_limit_epi16);
v = _mm_slli_epi16(v, 4);
_mm_store_si128(U++, v);
}
for(x=0;x<info->width/2;x+=8)
{
v = _mm_load_si128(V);
v = _mm_adds_epi16(v, rgb_limit_epi16);
v = _mm_subs_epu16(v, rgb_limit_epi16);
v = _mm_slli_epi16(v, 4);
_mm_store_si128(V++, v);
}
}
#else
//non SSE2
for(x=0;x<info->width*2;x++)
{
int val = *gptr++;
if(val < 0) val = 0;
if(val > 4095) val = 4095;
val <<= 4;
*src++ = val;
}
src = scanline2;
#endif
Row16uQuarter2OutputFormat(decoder, &info2, 0, output_row_ptr, output_pitch,
decoder->gop_frame_num/*0 frame*/, scratch->free_ptr, scratch->free_size, false, channeldata, channelpitch);
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
// Convert the rows of luma and chroma into the output format
switch(format)
{
case COLOR_FORMAT_YUYV:
case COLOR_FORMAT_UYVY:
// Pack the intermediate results into the output row
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
assert(0);//need quarter res BAYER To YUV decoder
}
else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// assert(0);//need quarter res RGB To YUV decoder
ConvertRGB2YUV( channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 10, info->colorspace, format);
}
else
{
ConvertUnpacked16sRowToPacked8u(channel_row_ptr, num_channels, output_row_ptr, output_width, format);
}
break;
case COLOR_FORMAT_RGB24:
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGB48toRGB24( channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 10, 0);
}
else
{
// Convert the intermediate results into a row of RGB24
ConvertUnpacked16sRowToRGB24(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, format, color_space);
}
break;
case COLOR_FORMAT_RGB32:
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGBA48toRGB32(channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2], NULL,
output_width,
output_row_ptr, output_pitch,
info->width, 1, 10, 0, 3/*only 3 chhanel not 4 for alpha*/);
}
else
{
// Convert the intermediate results into a row of RGBA32
ConvertUnpacked16sRowToRGB32(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, format, color_space, alpha);
}
break;
case COLOR_FORMAT_YU64:
case COLOR_FORMAT_V210:
// Convert the intermediate results into a row of YU64
ConvertUnpacked16sRowToYU64(channel_row_ptr, num_channels, output_row_ptr, output_width,
shift_yu64, precision, format);
break;
case COLOR_FORMAT_B64A:
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToB64A(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
}
else
{
ConvertUnpackedYUV16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, COLOR_FORMAT_B64A, color_space);
}
break;
case COLOR_FORMAT_R210:
case COLOR_FORMAT_DPX0:
case COLOR_FORMAT_RG30:
case COLOR_FORMAT_AR10:
case COLOR_FORMAT_AB10:
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGB30(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format, color_space);
}
else
{
ConvertUnpackedYUV16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format, color_space);
}
break;
case COLOR_FORMAT_RG48:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
case COLOR_FORMAT_RG64:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGBA64(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
default:
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "ReconstructQuarterFrame bad color format: %d\n", format);
}
#endif
assert(0);
break;
}
}
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
#endif
#if 0
// Copy the quarter resolution lowpass channels from the spatial transform
void CopyQuarterFrameToBuffer(TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision)
{
int output_width = info->width;
int output_height = info->height;
PIXEL *input_row_ptr[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr = output;
int input_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet
IMAGE *wavelet = transform_array[channel]->wavelet[1];
// Get the pointers to the first row in each lowpass band
input_row_ptr[channel] = wavelet->band[0];
input_pitch[channel] = wavelet->pitch / sizeof(PIXEL);
}
for (row = 0; row < output_height; row++)
{
// Descale and pack the pixels in each output row
CopyQuarterRowToBuffer(input_row_ptr, num_channels, output_row_ptr, output_width, precision);
// Advance the input row pointers
for (channel = 0; channel < num_channels; channel++) {
input_row_ptr[channel] += input_pitch[channel];
}
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
#endif
// Convert the quarter resolution lowpass channels to the specified output format
void ConvertQuarterFrameToBuffer(DECODER *decoder, TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision)
{
int output_width = info->width;
int output_height = info->height;
PIXEL *input_row_ptr[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr = output;
int input_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Value used for filling the fourth channel in ARGB output
int alpha = 255;
int format = COLORFORMAT(info);
int color_space = COLORSPACE(info);
int decoded_format = DECODEDFORMAT(info);
//bool inverted = false;
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the wavelets with quarter resolution
const int wavelet_index = 1;
IMAGE *wavelet = transform_array[channel]->wavelet[wavelet_index];
// The wavelet should have been reconstructed
assert(wavelet != NULL);
// The lowpass band should be valid
assert((wavelet->band_valid_flags & BAND_VALID_MASK(0)) != 0);
// Get the pointers to the first row in each lowpass band
input_row_ptr[channel] = wavelet->band[0];
input_pitch[channel] = wavelet->pitch / sizeof(PIXEL);
}
// Invert the image if required
switch (decoded_format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
output_row_ptr += (output_height - 1) * output_pitch;
output_pitch = NEG(output_pitch);
}
ComputeCube(decoder);
//HACK DAN20110122 -- some formats will not directly decode so need to use the AM route
{
if( format == COLOR_FORMAT_YU64 ||
format == COLOR_FORMAT_V210 ||
format == COLOR_FORMAT_R408 ||
format == COLOR_FORMAT_V408)
{
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true;
}
}
}
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output_row_ptr;
mailbox->pitch = output_pitch;
mailbox->framenum = 0;
for(channel = 0; channel < num_channels; channel++)
{
mailbox->channeldata[channel] = (uint8_t *)input_row_ptr[channel];
mailbox->channelpitch[channel] = input_pitch[channel]*sizeof(PIXEL);
}
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
// Convert each row to the specified output format
for (row = 0; row < output_height; row++)
{
// Right shift for converting lowpass coefficients to pixels
int descale = 4;
switch(format & 0x7fffffff)
{
case COLOR_FORMAT_YUYV:
case COLOR_FORMAT_UYVY:
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// assert(0);//need quarter res RGB To YUV decoder
ConvertRGB2YUV( input_row_ptr[1], input_row_ptr[0], input_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 14, info->colorspace, format);
}
else
{
// Descale and pack the pixels in each output row
CopyQuarterRowToBuffer(input_row_ptr, num_channels, output_row_ptr, output_width,
precision, format);
}
break;
case COLOR_FORMAT_RGB24:
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGB48toRGB24(input_row_ptr[1], input_row_ptr[0], input_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 14, 0);
}
else
{
// Convert the intermediate results into a row of RGB24
ConvertUnpacked16sRowToRGB24(input_row_ptr, num_channels, output_row_ptr, output_width, descale, format, color_space);
}
break;
case COLOR_FORMAT_RGB32:
case COLOR_FORMAT_RGB32_INVERTED:
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGBA48toRGB32( input_row_ptr[1], input_row_ptr[0], input_row_ptr[2], input_row_ptr[3],
output_width,
output_row_ptr, output_pitch,
info->width, 1, 14, 0, num_channels);
}
else
{
// Convert the intermediate results into a row of RGBA32
ConvertUnpacked16sRowToRGB32(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, format, color_space, alpha);
}
break;
case COLOR_FORMAT_YU64:
case COLOR_FORMAT_V210:
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
//TODO RGB to YUV Quarter RES DAN20110120 - handle above with HACK DAN20110122
//
}
else
{
// Convert the intermediate results into a row of YU64
ConvertUnpacked16sRowToYU64(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format);
}
break;
case COLOR_FORMAT_B64A:
// Convert the intermediate results to a row of ARGB with 16 bits per pixel
descale = 2;
ConvertUnpacked16sRowToB64A(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
case COLOR_FORMAT_R210:
case COLOR_FORMAT_DPX0:
case COLOR_FORMAT_RG30:
case COLOR_FORMAT_AR10:
case COLOR_FORMAT_AB10:
// Convert the intermediate results to a row of ARGB with 16 bits per pixel
descale = 2;
ConvertUnpacked16sRowToRGB30(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format, color_space);
break;
case COLOR_FORMAT_RG48:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGB48(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
case COLOR_FORMAT_RG64:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGBA64(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
default:
assert(0);
break;
}
// Advance the input row pointers
for (channel = 0; channel < num_channels; channel++) {
input_row_ptr[channel] += input_pitch[channel];
}
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
}
// Release all resources allocated by the decoder
void DecodeRelease(DECODER *decoder, TRANSFORM *transform[], int num_transforms)
{
#if _TIMING && 0
FILE *logfile = decoder->logfile;
uint32_t frame_count = decoder->frame_count;
if (logfile != NULL && frame_count > 0)\
{
#ifdef _WIN32
PrintStatistics(logfile, frame_count, NULL, TIMING_CSV_FILENAME);
#else
PrintStatistics(logfile, frame_count, NULL, NULL);
#endif
}
#endif
// Free the data structures allocated for decoding
ClearDecoder(decoder);
}
void DecodeForceMetadataRefresh(DECODER *decoder)
{
CFHDDATA *cfhddata = &decoder->cfhddata;
cfhddata->force_metadata_refresh = true;
if (decoder->parallelDecoder) {
cfhddata = &decoder->parallelDecoder->cfhddata;
cfhddata->force_metadata_refresh = true;
}
}
void SetDecoderFlags(DECODER *decoder, uint32_t flags)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
// Set the decoder flags
decoder->flags = flags;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoder flags: 0x%p\n", decoder->flags);
}
#endif
}
void SetDecoderFormat(DECODER *decoder, int width, int height, int format, int resolution)
{
// Need to modify the codec to use the decoding format
decoder->frame.width = width;
decoder->frame.height = height;
if(format == DECODED_FORMAT_WP13)
{
decoder->frame.output_format = format;
//decoder->frame.format = DECODED_FORMAT_RG48; //TODO Why is this needed with W13A work natively.
decoder->frame.format = format;
//decoder->frame.signed_pixels = 1;
decoder->frame.white_point = 13;
}
else if(format == DECODED_FORMAT_W13A)
{
decoder->frame.output_format = format;
// decoder->frame.format = DECODED_FORMAT_W13A; // TODO eventually this might be DECODED_FORMAT_RG64
decoder->frame.format = format;
//decoder->frame.signed_pixels = 1;
decoder->frame.white_point = 13;
}
else
{
decoder->frame.output_format = format;
decoder->frame.format = format;
//decoder->frame.signed_pixels = 0;
decoder->frame.white_point = 16;
}
decoder->frame.resolution = resolution;
decoder->frame.pixel_size = PixelSize(decoder->frame.format);
}
void SetDecoderCapabilities(DECODER *decoder)
{
int processor_count;
#ifdef _WIN32
int limit_cpus = 32;
#else
int limit_cpus = 32; // AJA spins off too many
#endif
// Set the capabilities that are most likely supported by the Intel Mac
decoder->thread_cntrl.capabilities = (_CPU_FEATURE_MMX | _CPU_FEATURE_SSE | _CPU_FEATURE_SSE2);
if (decoder->thread_cntrl.limit)
{
limit_cpus = decoder->thread_cntrl.limit;
}
else if (decoder->thread_cntrl.affinity)
{
int i;
const int max_cpu_count = 32;
limit_cpus = 0;
for (i = 0; i < max_cpu_count; i++)
{
if (decoder->thread_cntrl.affinity & (1<<i)) {
limit_cpus++;
}
}
}
// Set the number of processors
processor_count = GetProcessorCount();
if(processor_count > limit_cpus)
processor_count = limit_cpus;
#if (0 && DEBUG)
// Set the number of processors (for debugging)
//processor_count = 8;
processor_count = 1;
fprintf(stderr, "Limit processors to %d\n", processor_count);
#endif
decoder->thread_cntrl.capabilities |= (processor_count << 16);
}
int GetDecoderCapabilities(DECODER *decoder)
{
return decoder->thread_cntrl.capabilities;
}
bool SetDecoderColorFlags(DECODER *decoder, uint32_t color_flags)
{
if (/*MIN_DECODED_COLOR_SPACE <= color_flags && */color_flags <= MAX_DECODED_COLOR_SPACE)
{
decoder->frame.colorspace = color_flags;
// Indicate that the color flags were set as specified
return true;
}
// The specified color flags were not valid
return false;
}
// Compute the resolution corresponding to the specified combination of input and output dimensions
int DecodedResolution(int input_width, int input_height, int output_width, int output_height)
{
int decoded_width;
int decoded_height;
// Output height can be negative for inverted RGB
output_height = abs(output_height);
if (output_width == input_width && output_height == input_height) {
return DECODED_RESOLUTION_FULL;
}
// Compute the dimensions for half resolution decoding
decoded_width = input_width / 2;
decoded_height = input_height / 2;
// Do the output dimensions correspond to half resolution decoding?
if (output_width == decoded_width && output_height == decoded_height) {
return DECODED_RESOLUTION_HALF;
}
// Compute the dimensions for quarter resolution decoding
decoded_width /= 2;
decoded_height /= 2;
// Do the output dimensions correspond to half resolution decoding?
if (output_width == decoded_width && output_height == decoded_height) {
return DECODED_RESOLUTION_QUARTER;
}
return DECODED_RESOLUTION_UNSUPPORTED;
}
// Compute the decoded resolution that is closest to the output dimensions
int DecodedScale(int input_width, int input_height, int output_width, int output_height)
{
int decoded_width = input_width;
int decoded_height = input_height;
static int decodedResolution[] =
{
DECODED_RESOLUTION_FULL,
DECODED_RESOLUTION_HALF,
DECODED_RESOLUTION_QUARTER
};
int reduction = 0;
int max_reduction = 2;
// Output height can be negative for inverted RGB
output_height = abs(output_height);
#if 1
// Always decode to the next larger size
while (decoded_width > output_width &&
decoded_height > output_height &&
reduction < max_reduction)
{
// Decode to a frame size that is larger than the output image
int reduced_width = decoded_width / 2;
int reduced_height = decoded_height / 2;
if (reduced_width >= output_width && reduced_height >= output_height)
{
decoded_width = reduced_width;
decoded_height = reduced_height;
reduction++;
}
else
{
break;
}
}
#else
while (decoded_width*4 > output_width*5 &&
decoded_height*4 > output_height*5 &&
reduction < max_reduction)
{
#if 0
// Decode to a frame size that is larger than the output image
int reduced_width = decoded_width / 2;
int reduced_height = decoded_height / 2;
if (reduced_width >= output_width && reduced_height >= output_height)
{
decoded_width = reduced_width;
decoded_height = reduced_height;
reduction++;
}
else
{
break;
}
#else
// Better to scale up a smaller image than scale down a larger image
decoded_width /= 2;
decoded_height /= 2;
reduction++;
#endif
}
#endif
// Check that the decoded resolution is valid
assert(0 <= reduction && reduction <= max_reduction);
return decodedResolution[reduction];
}
void ComputeDecodedDimensions(int encoded_width, int encoded_height, int decoded_resolution,
int *decoded_width_out, int *decoded_height_out)
{
switch (decoded_resolution)
{
default:
assert(0);
case DECODED_RESOLUTION_FULL:
*decoded_width_out = encoded_width;
*decoded_height_out = encoded_height;
break;
case DECODED_RESOLUTION_HALF:
*decoded_width_out = encoded_width / 2;
*decoded_height_out = encoded_height / 2;
break;
case DECODED_RESOLUTION_QUARTER:
*decoded_width_out = encoded_width / 4;
*decoded_height_out = encoded_height / 4;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
//TODO: Check that the lowpass dimensions are correct
*decoded_width_out = encoded_width / 8;
*decoded_height_out = encoded_height / 8;
break;
}
}
// Return true if the specified resolution is supported
bool IsDecodedResolution(int resolution)
{
if (resolution == DECODED_RESOLUTION_QUARTER) {
return true;
}
return (resolution == DECODED_RESOLUTION_FULL ||
resolution == DECODED_RESOLUTION_HALF);
}
// Return true if the encoded sample is a key frame
bool IsSampleKeyFrame(uint8_t *sample, size_t size)
{
bool key_frame_flag = false;
// Search the first twenty tags for the sample type
const int num_tags = 20;
int i;
BITSTREAM bitstream;
InitBitstreamBuffer(&bitstream, sample, size, BITSTREAM_ACCESS_READ);
for (i = 0; i < num_tags && size > 0; i++, size -= sizeof(TAGVALUE))
{
TAGVALUE segment = GetSegment(&bitstream);
if (segment.tuple.tag == CODEC_TAG_SAMPLE)
{
switch (segment.tuple.value)
{
case SAMPLE_TYPE_GROUP:
case SAMPLE_TYPE_FIRST:
case SAMPLE_TYPE_IFRAME:
key_frame_flag = true;
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
case SAMPLE_TYPE_FRAME:
case SAMPLE_TYPE_SECOND:
case SAMPLE_TYPE_PFRAME:
default:
key_frame_flag = false;
break;
case SAMPLE_TYPE_GROUP_TRAILER:
case SAMPLE_TYPE_NONE:
case SAMPLE_TYPE_ERROR:
case SAMPLE_TYPE_CHANNEL:
assert(0); // Unexpected situation
key_frame_flag = false; // Report the sample as a non-key frame
break;
}
break; // Found the sample type
}
}
return key_frame_flag;
}
// Return the number of the more recent decoded frame
uint32_t DecodedFrameNumber(DECODER *decoder)
{
CODEC_STATE *codec = &decoder->codec;
if (decoder == NULL) return 0;
return codec->frame_number;
}
/***** Start of the new code for the finite state machine (FSM) decoder *****/
#if _PROCESSOR_DISPATCH
__declspec(cpu_dispatch(Pentium_4,Generic))
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
// Stub routine for processor specific dispatch
}
#endif
#if _PROCESSOR_GENERIC
#if _PROCESSOR_DISPATCH
__declspec(cpu_specific(Generic))
#endif
// This version assumes that the row is a multiple of 8 bytes
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
int count;
// Check that the row starts on a 16-byte boundary
//assert(ISALIGNED(rowptr, 16));
// Check that the row length (in bytes) is a multiple of 8 byte blocks
assert(ISALIGNED(length, 8));
// Convert the length from pixels to 8-byte blocks
count = (length >> 3);
// This code assumes that at least one 8-byte block will be zeroed
assert(count > 0);
__asm
{
pxor mm0, mm0 // Zero a 16 byte register
mov eax, rowptr // Load the pointer to the memory block
mov ebx, count // Load the count of 8-byte blocks
loop: movq [eax], mm0 // Write 8 bytes of zeros
add eax, 8 // Advance to the next 8 byte block
sub ebx, 1 // Decrement the number of blocks
jg loop
}
//_mm_empty();
}
#endif
#if _PROCESSOR_PENTIUM_4
#if _PROCESSOR_DISPATCH
__declspec(cpu_specific(Pentium_4))
#endif
#ifndef _WIN64
// This version assumes that the row is a multiple of 16 bytes
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
int count;
// Check that the row starts on a 16-byte boundary
assert(ISALIGNED(rowptr, 16));
// Check that the row length (in bytes) is a multiple of 16 byte blocks
assert(ISALIGNED(length, 16));
// Convert the length from pixels to 16-byte blocks
count = (length >> 4);
// This code assumes that at least one 16-byte block will be zeroed
assert(count > 0);
#if 1 //DANREMOVE
memset(rowptr, 0, length);
#else
__asm
{
pxor xmm0, xmm0 // Zero a 16 byte register
mov eax, rowptr // Load the pointer to the memory block
mov ebx, count // Load the count of 16-byte blocks
loop: movdqa [eax], xmm0 // Write 16 bytes of zeros
add eax, 16 // Advance to the next 16 byte block
sub ebx, 1 // Decrement the number of blocks
jg loop
}
#endif
}
#else
// This version assumes that the row is a multiple of 16 bytes
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
// Check that the row starts on a 16-byte boundary
assert(ISALIGNED(rowptr, 16));
// Check that the row length (in bytes) is a multiple of 16 byte blocks
assert(ISALIGNED(length, 16));
memset(rowptr, 0, length);
}
#endif
#endif
#if (0 && _DEBUG)
// Functions for the finite state machine decoder (debug version)
static FSMENTRY *GetFSMTableEntry(FSM *fsm, int index)
{
// Return the address of the next table entry in the finite state machine
return &fsm->next_state[index];
}
static void ResetFSM(FSM *fsm)
{
// Reset the state to the beginning of the finite state machine entries
fsm->next_state = fsm->entries;
}
static void UpdateFSM(FSM *fsm, int next)
{
// Change the state pointer to the next block of table entries
fsm->next_state = fsm->entries + (next << FSM_INDEX_SIZE);
}
#else
// Macros for the finite state machine decoder
#if _INDIVIDUAL_LUT
#define GetFSMTableEntry(fsm, index) (FSMENTRY *)fsm->next_state+index
#define ResetFSM(fsm) fsm->next_state = fsm->table.entries[0]
#define UpdateFSM(fsm, next) fsm->next_state = fsm->table.entries[next]
#define GetFSMTableEntryIndividual(fsm, index) (FSMENTRY *)fsm->table.entries_ind[(fsm->next_state_index << FSM_INDEX_SIZE) | index]
#define ResetFSMIndividual(fsm) fsm->next_state_index = 0
#define UpdateFSMIndividual(fsm, next) fsm->next_state_index = next
#else
#define GetFSMTableEntry(fsm, index) (FSMENTRY *)fsm->next_state+index
#define ResetFSM(fsm) fsm->next_state = fsm->table.entries
#define UpdateFSM(fsm, next) fsm->next_state = fsm->table.entries+((int)next << FSM_INDEX_SIZE)
#endif
#endif
#if _DEBUG
static void DebugOutputFSMEntry(FSM *fsm, int index, FSMENTRY *entry)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = entry->value0 / 32;
int value1 = entry->value1 / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
}
static void DebugOutputFSMEntryFast(FSM *fsm, int index, FSMENTRYFAST *entry)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = (entry->values >> 16) / 32;
int value1 = (entry->values & 0xFFFF) / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
}
static void DebugOutputFSM(FSM *fsm)
{
int num_entries = FSM_INDEX_ENTRIES;
int i;
for (i = 0; i < num_entries; i++)
{
FSMENTRY *entry = &fsm->table.entries[0][i];
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
}
}
static void PrintFSMEntry(FSM *fsm, int index, FSMENTRY *entry, FILE *logfile)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = entry->value0 / 32;
int value1 = entry->value1 / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
if (logfile) {
fprintf(logfile, "%d, %d, %d, %d, %d\n", index, value0, value1, pre_skip, post_skip);
}
}
static void PrintFSMEntryFast(FSM *fsm, int index, FSMENTRYFAST *entry, FILE *logfile)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = (entry->values >> 16) / 32;
int value1 = (entry->values & 0xFFFF) / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
if (logfile) {
fprintf(logfile, "%d, %d, %d, %d, %d\n", index, value0, value1, pre_skip, post_skip);
}
}
#endif
static inline int GetFastByte(BITSTREAM *stream)
{
// Inline of the third case of GetByte
uint8_t *lpCurrentWord = stream->lpCurrentWord;
// Get the next byte from the bitstream
int byte = (uint32_t )(*(lpCurrentWord++));
// Update the state of the bitstream
stream->lpCurrentWord = lpCurrentWord;
#if ERROR_TOLERANT
// Update the count of bytes used
stream->nWordsUsed--;
#endif
// Check that the high bits are zero
assert((byte & ~BITMASK(8)) == 0);
return byte;
}
#if 0
static inline int GetFastShort(BITSTREAM *stream)
{
// Adaptation of the code in GetByte
uint8_t *lpCurrentWord = stream->lpCurrentWord;
// Get the next byte from the bitstream
int byte = (uint32_t )(lpCurrentWord[0]);
int word = (byte << 8) | (uint32_t )(lpCurrentWord[1]);
// Update the state of the bitstream
stream->lpCurrentWord = lpCurrentWord+2;
// Check that the high bits are zero
assert((word & ~BITMASK(16)) == 0);
return word;
}
#endif
// Must declare the byte swap function even though it is an intrinsic
//int _bswap(int);
#if 0
static inline int GetFastLong(BITSTREAM *stream)
{
uint32_t *lpCurrentWord = (uint32_t *)stream->lpCurrentWord;
int word = *(lpCurrentWord)++;
//word = _bswap(word);
word = SwapInt32BtoN(word);
stream->lpCurrentWord = (uint8_t *)lpCurrentWord;
return word;
}
#endif
#if 0 //DAN20041030 not used
// Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps
// Original version that does not use a separate buffer for decoding
bool DecodeBandFSM(FSM *fsm, BITSTREAM *stream, PIXEL *image, int width, int height, int pitch, int quantization)
{
int index, byte;
FSMENTRY *entry;
PIXEL *rowptr = image;
int column = 0;
int32_t value;
size_t bytes_row_size = width * sizeof(PIXEL);
PIXEL *maxptr;
int length = width * sizeof(PIXEL);
//ROI roi = {width, 1};
// This version of Huffman decoder assumes that one byte
// is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Convert the pitch to units of pixels
pitch /= sizeof(PIXEL);
// Compute the address of the row after the last row in the band
maxptr = rowptr + height * pitch;
// Round up the row length (in bytes) to a multiple of 16 bytes
length = ALIGN16(length);
#if (0 && DEBUG)
zerorow_count = 0;
#endif
ZeroHighPassRow(rowptr, length);
// Decode runs and magnitude values until the band end trailer is decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return when the entire band is decoded
if (entry->value0 == BAND_END_TRAILER) {
// Zero out the whole subband from here on
rowptr += pitch;
while(rowptr < maxptr) {
ZeroHighPassRow(rowptr, length);
rowptr += pitch;
}
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0) {
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
}
// If there is only one decoded magnitude value
else if(entry->value1 == 0) {
// Undo quantization and scaling
value = quantization * entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
column = 0;
rowptr[column++] = SATURATE(value);
}
}
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER) {
// Zero out the whole subband from here on
rowptr += pitch;
while(rowptr < maxptr) {
ZeroHighPassRow(rowptr, length);
rowptr += pitch;
}
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0) {
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0) {
// Undo quantization and scaling
int32_t value = quantization * entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
column = 0;
rowptr[column++] = SATURATE(value);
}
}
}
}
#endif
// Decode a subband of highpass coefficients using a finite state machine.
// One byte is read from the bitstream each time and decoded in two steps.
// New version that uses a buffer aligned to the cache for decoding.
#if 0
static inline void ZeroHighPassBuffer(PIXEL *ptrCacheLines, int numCacheLines)
{
// This routine assume that the cache line size is 64 bytes
assert(_CACHE_LINE_SIZE == 64);
// This routine assumes that the input pointer is aligned to a cache line
assert(ISALIGNED(ptrCacheLines, _CACHE_LINE_SIZE));
// This routine assumes that at least one cache line will be written
assert(numCacheLines > 0);
#if __GNUC__
memset(ptrCacheLines, 0, numCacheLines * _CACHE_LINE_SIZE);
#else
__asm
{
pxor xmm0, xmm0 // Zero a 16 byte register
mov eax, ptrCacheLines // Load the pointer to the memory block
mov ebx, numCacheLines // Load the count of the number of cache lines
loop: movdqa [eax], xmm0 // Write 64 bytes of zeros using aligned stores
movdqa [eax+16], xmm0
movdqa [eax+32], xmm0
movdqa [eax+48], xmm0
add eax, 64 // Advance to the next cache line
sub ebx, 1 // Decrement the number of cache lines
jg loop
}
#endif
// The routine returns the pointer to the cache line after zeroing the block
}
#endif
#if 0
static inline void CopyRowBuffer(char *rowptr, PIXEL *buffer, int length)
{
// Note that the length is in units of bytes (not pixels)
int count; // Number of 16-byte blocks to copy
// Check that the row length is an integer multiple of 16-byte blocks
assert(ISALIGNED(length, 16));
// Convert the row length to the number of 16-byte blocks to copy
count = length >> 4;
// This routine assumes that at least one 16 byte block will be copied
assert(count > 0);
#if __GNUC__
// Use standard memory copy
memcpy(rowptr, buffer, length);
#else
// Copy a multiple of 16 byte blocks
__asm
{
mov eax, rowptr // Load the pointer to the destination
mov ebx, buffer // Load the pointer to the source
mov ecx, count // Load the number of 16-byte blocks to copy
loop: movdqa xmm0, [ebx] // Load 16 bytes from the source
movntdq [eax], xmm0 // Copy 16 bytes to the destination
add eax, 16 // Advance to the group of 16 bytes
add ebx, 16
sub ecx, 1 // Decrement the number of blocks to copy
jg loop
}
#endif
}
#endif
// DecodeBandFSMBuffered is no longer used
#if 0 //dan20041030 not used
bool DecodeBandFSMBuffered(FSM *fsm, BITSTREAM *stream, PIXEL *image,
int width, int height, int pitch,
int quantization, char *decoding_buffer, size_t decoding_buffer_size)
{
char *rowptr = (char *)image; // Pointer to current row
char *maxptr = rowptr + height * pitch; // Address of row after the last row
FSMENTRY *entry;
int index;
int byte;
int column = 0;
int32_t value;
size_t row_size;
size_t cache_row_size; // Size of a row in bytes
int cache_line_count; // Size of the buffer in cache lines
PIXEL *buffer; // Pixel pointer to the buffer
int length; // Length of row in bytes
// Check that the processing size allows two chunks per byte
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
// The bitstream buffer should be empty
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Compute the number of cache lines used in the buffer
row_size = width * sizeof(PIXEL);
cache_row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
cache_line_count = (cache_row_size >> _CACHE_LINE_SHIFT);
// Check that the buffer is large enough
assert(decoding_buffer != NULL && decoding_buffer_size >= cache_row_size);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(decoding_buffer, _CACHE_LINE_SIZE));
// This routine assumes that the rows are contiguous and the pitch is a multiple of 16 bytes
length = pitch;
assert(length == ALIGN(row_size, 16));
// Cast the buffer pointer for pixel access
buffer = (PIXEL *)decoding_buffer;
// Zero the decoding buffer
ZeroHighPassBuffer(buffer, cache_line_count);
// Decode runs and magnitude values until the band end trailer is decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return when the entire band is decoded
if (entry->value0 == BAND_END_TRAILER)
{
// Copy the buffer to the row if not already beyond the band
if (rowptr < maxptr) CopyRowBuffer(rowptr, buffer, length);
// Advance to the next row
rowptr += pitch;
// Zero the remaining rows in the subband
while (rowptr < maxptr) {
ZeroHighPassRow((PIXEL *)rowptr, length);
rowptr += pitch;
}
// Reset the finite state machine to the root node in the Huffman tree
ResetFSM(fsm);
// Return indication that the band was fully decoded
return true;
}
// Set the finite state machine to the next state in the Huffman tree
UpdateFSM(fsm, entry->next_state);
// No magnitude values decoded?
if (entry->value0 == 0)
{
// No magnitudes decoded so just advance the column pointer
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
}
// Only one magnitude value decoded?
else if (entry->value1 == 0)
{
// Process the magnitude value that was decoded
// Undo quantization and scaling
value = quantization * entry->value0;
// Advance to the column where the value should be placed
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
buffer[column] = SATURATE(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
}
else // Two magnitude values were decoded
{
// Check the column before storing values
assert(0 <= column && column < width);
if (column < width - 1) {
// Dequantize and store the first value
value = quantization * entry->value0;
buffer[column++] = SATURATE(value);
// Dequantize and store the second value
value = quantization * entry->value1;
buffer[column++] = SATURATE(value);
}
else {
// Dequantize and store the first value in the current row
value = quantization * entry->value0;
buffer[column] = SATURATE(value);
// Dequantize the second value
value = quantization * entry->value1;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
// Reset the column to the beginning of the row
column = 0;
// Store the second value in the new row
buffer[column++] = SATURATE(value);
}
}
// Decode the second 4-bit chunk
index = byte & FSM_INDEX_MASK;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
// Copy the buffer to the row if not already beyond the band
if (rowptr < maxptr) CopyRowBuffer(rowptr, buffer, length);
// Advance to the next row
rowptr += pitch;
// Zero the remaining rows in the subband
while (rowptr < maxptr) {
ZeroHighPassRow((PIXEL *)rowptr, length);
rowptr += pitch;
}
// Reset the finite state machine to the root node in the Huffman tree
ResetFSM(fsm);
// Return indication that the band was fully decoded
return true;
}
// Set the finite state machine to the next state in the Huffman tree
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0) {
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0) {
// Undo quantization and scaling
int32_t value = quantization * entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
buffer[column] = SATURATE(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if (column < width-1) {
value = quantization * entry->value0;
buffer[column++] = SATURATE(value);
value = quantization * entry->value1;
buffer[column++] = SATURATE(value);
}
else {
value = quantization * entry->value0;
buffer[column] = SATURATE(value);
value = quantization * entry->value1;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
// Reset the column to the beginning of the row
column = 0;
buffer[column++] = SATURATE(value);
}
}
}
}
#endif
#if 0 //dan20041030 not used
// Decode a subband using FSM, combine the two results decoded from one byte
bool DecodeBandFSMCombined(FSM *fsm, BITSTREAM *stream, PIXEL *image, int width, int height, int pitch, int quantization)
{
int index, skip;
uint8_t byte;
FSMENTRY *entry1, *entry2;
PIXEL *rowptr = image;
int row = 0, column = 0;
int32_t value,bytes_row_size = width*sizeof(PIXEL);
PIXEL *maxptr = rowptr + height*pitch;
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
ZeroHighPassRow(rowptr, width);
// Double check that the bitstream buffer is empty
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Decode runs and magnitude values until the band end trailer is decoded
for (;;)
{
// Read a byte from the bitstream
//byte = GetBits(stream, BITSTREAM_WORD_SIZE);
#if 0
byte = GetByte(stream);
if (stream->error != BITSTREAM_ERROR_OKAY) {
stream->error = VLC_ERROR_NOTFOUND;
return false;
}
#else
// Inline of the third case of GetByte
uint8_t *lpCurrentWord = stream->lpCurrentWord;
// Get the next byte from the bitstream
byte = (uint32_t )(*(lpCurrentWord++));
// Update the state of the bitstream
stream->lpCurrentWord = lpCurrentWord;
// Check that the high bits are zero
assert((byte & ~BITMASK(8)) == 0);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
entry1 = GetFSMTableEntry(fsm, index);
UpdateFSM(fsm, entry1->next_state);
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
entry2 = GetFSMTableEntry(fsm, index);
UpdateFSM(fsm, entry2->next_state);
// Return when the subband is completely decoded
if(entry1->value0 == BAND_END_TRAILER || entry2->value0 == BAND_END_TRAILER) {
ResetFSM(fsm);
return true;
}
// If no magnitude value is decoded at the first step
if (entry1->value0 == 0) {
// If no magnitude is decoded at the second step
if(entry2->value0 == 0) {
column += entry1->pre_skip+entry2->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If one magnitude is decoded at the second step
else if(entry2->value1 == 0) {
// Skip to the non-zero position
column += entry1->pre_skip+entry2->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Fill in the decoded magnitude
// Undo quantization and scaling
value = quantization * entry2->value0;
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value
rowptr[column] = SATURATE(value);
column += entry2->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If two magnitudes are decoded at the second step
else {
column += entry1->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry2->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry2->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry2->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry2->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
column = 0;
rowptr[column++] = SATURATE(value);
}
}
}
// If only one magnitude is decoded at the first step
else if(entry1->value1 == 0) {
// Undo quantization and scaling
value = quantization * entry1->value0;
column += entry1->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
// If no magnitude is decoded at the second step
if(entry2->value0 == 0) {
column += entry1->post_skip+entry2->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If one magnitude is decoded at the second step
else if (entry2->value1 == 0)
{
// Undo quantization and scaling
value = quantization * entry2->value0;
column += entry1->post_skip+entry2->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
column += entry2->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If two magnitudes are decoded at the second step
else
{
column += entry1->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry2->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry2->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry2->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry2->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
column = 0;
rowptr[column++] = SATURATE(value);
}
}
}
// If two magnitudes are decoded at the first step
else {
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry1->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry1->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry1->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry1->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
column = 0;
rowptr[column++] = SATURATE(value);
}
// If two magnitudes are decoded at the first step
// then at most one more magnitude can be decoded at the second step
assert(entry2->value1 == 0);
// If no magnitude is decoded at the second step
if(entry2->value0 == 0) {
column += entry2->pre_skip; // entry2->pre_skip <=4 must be true
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If one magnitude is decoded at the second step
else {
column += entry2->pre_skip; // must be a small zero run
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if (rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Fill in the decoded magnitude
// Undo quantization and scaling
value = quantization * entry2->value0;
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
column += entry2->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if (rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
}
}
}
#endif
#if 0 //dan20041030 not used
// Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps
// Original version that does not use a separate buffer for decoding
bool DecodeBandFSM8s(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte;
FSMENTRY *entry;
PIXEL8S *rowptr = image;
int column = 0;
int32_t value;
PIXEL8S *maxptr;
int length = width * sizeof(PIXEL8S);
//ROI roi = {width, 1};
// This version of Huffman decoder assumes that one byte
// is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Convert the pitch to units of pixels
pitch /= sizeof(PIXEL8S);
// Compute the address of the row after the last row in the band
maxptr = rowptr + height * pitch;
// Round up the row length (in bytes) to a multiple of 16 bytes
length = ALIGN16(length);
ZeroHighPassRow((PIXEL *)rowptr, length);
// Decode runs and magnitude values until the band end trailer is decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return when the entire band is decoded
if (entry->value0 == BAND_END_TRAILER) {
// Zero out the whole subband from here on
rowptr += pitch;
while(rowptr < maxptr) {
ZeroHighPassRow((PIXEL *)rowptr, length);
rowptr += pitch;
}
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0)
{
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
}
// If there is only one decoded magnitude value
else if(entry->value1 == 0)
{
value = entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE8S(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = entry->value0;
rowptr[column++] = SATURATE8S(value);
value = entry->value1;
rowptr[column++] = SATURATE8S(value);
}
else {
value = entry->value0;
rowptr[column] = SATURATE8S(value);
value = entry->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
column = 0;
rowptr[column++] = SATURATE8S(value);
}
}
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
// Zero out the whole subband from here on
rowptr += pitch;
while(rowptr < maxptr) {
ZeroHighPassRow((PIXEL *)rowptr, length);
rowptr += pitch;
}
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0)
{
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0)
{
value = entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE8S(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = entry->value0;
rowptr[column++] = SATURATE8S(value);
value = entry->value1;
rowptr[column++] = SATURATE8S(value);
}
else {
value = entry->value0;
rowptr[column] = SATURATE8S(value);
value = entry->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
column = 0;
rowptr[column++] = SATURATE8S(value);
}
}
}
}
#endif
// same as DecodeBandFSM8sNoGap but output to 16bit data
bool DecodeBandFSM16sNoGap2Pass(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant)
{
int index, byte;
FSMENTRY *entry;
PIXEL *rowptr = (PIXEL *)image;
PIXEL16S *bandendptr;
int value;
#if ERROR_TOLERANT
uint8_t *startCurrentWord = stream->lpCurrentWord;
int32_t startWordsUsed = stream->nWordsUsed;
#endif
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
if (image == NULL) {
return false;
}
// Reset the decoder
ResetFSM(fsm);
pitch /= sizeof(PIXEL16S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S));
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
#if 0 // test for errors.
{
if((rand() % 10) == 1)
stream->lpCurrentWord[rand()%50] ^= 1;
}
#endif
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while((intptr_t)bandendptr - (intptr_t)rowptr >= 0)
#else
for (;;)
#endif
{
// Read a byte from the bitstream
#if ERROR_TOLERANT
if(stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
goto SecondPass;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
goto SecondPass;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
SecondPass:
rowptr = (PIXEL16S *)image;
AlignBits(stream);
AlignBitsTag(stream);
stream->lpCurrentWord += 4;
stream->nWordsUsed -= 4;
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while((intptr_t)bandendptr - (intptr_t)rowptr >= 0)
#else
for (;;)
#endif
{
// Read a byte from the bitstream
#if ERROR_TOLERANT
if(stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] |= value << 8;
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] |= value << 8;
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] |= value << 8;
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] |= value << 8;
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
#if ERROR_TOLERANT
// Reset the decoder
ResetFSM(fsm);
// Backup the bitstream to the beginning of the band
stream->lpCurrentWord = startCurrentWord;
stream->nWordsUsed = startWordsUsed;
#if 0
AlignBitsTag(stream);
// Read the debugging marker
{
TAGVALUE segment;
do
{
segment = GetTagValue(stream);
}
while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
#else
SkipSubband(stream);
#endif
#endif
return true;
}
// Same as DecodeBandFSM8sNoGap but output to 16bit data
#if _DEBUG
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, FILE *logfile)
#else
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch)
#endif
{
int index, byte;
FSMENTRY *entry;
FSMENTRYFAST *entryfast;
PIXEL16S *rowptr = image;
PIXEL16S *bandendptr;
PIXEL16S *fastendptr;
int32_t value;
uint8_t *startCurrentWord = stream->lpCurrentWord;
uint8_t *CurrentWord = stream->lpCurrentWord;
int32_t startWordsUsed = stream->nWordsUsed;
ptrdiff_t offset;
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
#if (0 && DEBUG)
DebugOutputBitstreamPosition(stream);
DebugOutputBitstreamBytes(stream, 16);
#endif
// Reset the decoder
ResetFSM(fsm);
#if (0 && DEBUG)
DebugOutputFSM(fsm);
#endif
pitch /= sizeof(PIXEL16S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S));
//memset(rowptr, 0, pitch*height*sizeof(PIXEL16S));
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
#if 0 // test for errors.
{
if((rand() % 10) == 1)
stream->lpCurrentWord[rand()%50] ^= 1;
}
#endif
fastendptr = bandendptr;
fastendptr -= 500;
// Decode runs and magnitude values until the entire band is decoded
while(rowptr < fastendptr)
{
// Read a byte from the bitstream
byte = *CurrentWord++;
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entryfast = (FSMENTRYFAST *)GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntryFast(fsm, index, entryfast);
PrintFSMEntryFast(fsm, index, entryfast, logfile);
#endif
// Set the pointer to the next state
UpdateFSM(fsm, (int)entryfast->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entryfast->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
*((uint32_t *)rowptr) = entryfast->values;
// Skip the appropriate distance
rowptr = &rowptr[entryfast->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entryfast = (FSMENTRYFAST *)GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntryFast(fsm, index, entryfast);
PrintFSMEntryFast(fsm, index, entryfast, logfile);
#endif
// set the pointer to the next state
UpdateFSM(fsm, (int)entryfast->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entryfast->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
*((uint32_t *)rowptr) = entryfast->values;
// Skip the decoded zero runs
rowptr = &rowptr[entryfast->pre_post_skip >> 12];
}
offset = CurrentWord - startCurrentWord;
stream->lpCurrentWord += offset;
stream->nWordsUsed -= (int)offset;
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while(bandendptr >= rowptr)
#else
for (;;)
#endif
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr)) {
return true;
}
#endif
#if (0 && DEBUG)
PrintBitstreamPosition(stream, logfile);
#endif
// Read a byte from the bitstream
#if ERROR_TOLERANT
if(stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntry(fsm, index, entry);
PrintFSMEntry(fsm, index, entry, logfile);
#endif
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
if ((value = entry->value0)) {
rowptr[0] = value;//SATURATE(value);
}
// Write down the second decoded magnitude
if ((value = entry->value1)) {
rowptr[1] = value;//SATURATE(value);
}
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntry(fsm, index, entry);
PrintFSMEntry(fsm, index, entry, logfile);
#endif
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
if ((value = entry->value0)) {
rowptr[0] = value;//SATURATE(value);
}
// Write down the second decoded magnitude
if ((value = entry->value1)) {
rowptr[1] = value;//SATURATE(value);
}
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
#if ERROR_TOLERANT
// Reset the decoder
ResetFSM(fsm);
// Backup the bitstream to the beginning of the band
stream->lpCurrentWord = startCurrentWord;
stream->nWordsUsed = startWordsUsed;
#if 0
AlignBitsTag(stream);
// Read the debugging marker
{
TAGVALUE segment;
do
{
segment = GetTagValue(stream);
}
while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
#else
SkipSubband(stream);
#endif
#endif
return true;
}
bool DecodeBandFSM16sNoGapWithPeaks(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, PIXEL *peaks, int level, int quant)
{
int index, byte;
FSMENTRY *entry;
PIXEL16S *rowptr = image;
PIXEL16S *bandendptr;
PIXEL16S *fastendptr;
int32_t value;
uint8_t *startCurrentWord = stream->lpCurrentWord;
uint8_t *CurrentWord = stream->lpCurrentWord;
int32_t startWordsUsed = stream->nWordsUsed;
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
// Reset the decoder
ResetFSM(fsm);
//This is been called with non-prequantized FSM
if(quant>1) level /= quant;
pitch /= sizeof(PIXEL16S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S));
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
#if 0 // test for errors.
{
if((rand() % 10) == 1)
stream->lpCurrentWord[rand()%50] ^= 1;
}
#endif
fastendptr = bandendptr;
fastendptr -= 1000;
// Decode runs and magnitude values until the entire band is decoded
while(rowptr < fastendptr)
{
// Read a byte from the bitstream
byte = *CurrentWord++;
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if(abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if(abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
stream->lpCurrentWord += ((intptr_t)CurrentWord - (intptr_t)startCurrentWord);
stream->nWordsUsed -= (int)(((intptr_t)CurrentWord - (intptr_t)startCurrentWord));
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while(((intptr_t)bandendptr - (intptr_t)rowptr) >= 0)
#else
for (;;)
#endif
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr)) {
return true;
}
#endif
// Read a byte from the bitstream
#if ERROR_TOLERANT
if(stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if(abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if(abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
#if ERROR_TOLERANT
// Reset the decoder
ResetFSM(fsm);
// Backup the bitstream to the beginning of the band
stream->lpCurrentWord = startCurrentWord;
stream->nWordsUsed = startWordsUsed;
#if 0
AlignBitsTag(stream);
// Read the debugging marker
{
TAGVALUE segment;
do
{
segment = GetTagValue(stream);
}
while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
#else
SkipSubband(stream);
#endif
#endif
return true;
}
// This version of DecodeBandFSM() assumes that the gap between width and pitch has been coded as
// zero runs. Therefore decoded magnitude values can be written down without the need to check
// if the end of a row has been reached. Hence the total number of conditionals in DecodeBandFSM
// can be significantly reduced.
// Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps
// Original version that does not use a separate buffer for decoding
#if !_INDIVIDUAL_ENTRY
#if 0 //dan20041030 not used
bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte;
FSMENTRY *entry;
PIXEL8S *rowptr = image;
PIXEL8S *bandendptr;
int32_t value;
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
pitch /= sizeof(PIXEL8S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height);
// This version of Huffman decoder assumes that one byte
// is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
// Decode runs and magnitude values until the entire band is decoded
//while (rowptr < bandendptr)
for (;;)
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr)) {
return true;
}
#endif
// Check that the decoder has not overrun the output array
//assert(rowptr < bandendptr);
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
#if 1
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
#endif
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->post_skip];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
#if 1
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
#endif
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->post_skip];
}
}
#endif
#elif _SINGLE_FSM_TABLE
bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte, i;
FSMENTRY *entry,*firstentry = fsm->table->firstentry;
PIXEL8S *rowptr = image;
PIXEL8S *bandendptr;
int32_t value;
pitch /= sizeof(PIXEL8S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height);
// The Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Decode runs and magnitude values until the entire band is decoded
for (;;)
{
// Check that the decoder has not overrun the output array
//assert(rowptr < bandendptr);
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
i = (fsm->next_state_index << FSM_INDEX_SIZE) | index;//DAN
entry = firstentry+i; //DAN
// Return if the subband is decoded completely
if(entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSMIndividual(fsm);
return true;
}
// set the pointer to the next state
UpdateFSMIndividual(fsm, (entry->next_state));
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->post_skip];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
i = (fsm->next_state_index << FSM_INDEX_SIZE) | index;//DAN
entry = firstentry+i; //DAN
// Return if the subband is decoded completely
if(entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSMIndividual(fsm);
return true;
}
// set the pointer to the next state
UpdateFSMIndividual(fsm, (entry->next_state));
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->post_skip];
}
}
#else
bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte;
FSMENTRY *entry;
PIXEL8S *rowptr = image;
PIXEL8S *bandendptr;
int32_t value;
#if 1
__declspec(align(4)) FSMENTRY buffer;
#endif
pitch /= sizeof(PIXEL8S);
// zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height);
// The Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
// Decode runs and magnitude values until the entire band is decoded
for (;;)
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr)) {
return true;
}
#endif
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntryIndividual(fsm, index);
// Return if the subband is decoded completely
if(entry == NULL)
{
assert(rowptr <= bandendptr);
ResetFSMIndividual(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSMIndividual(fsm, (entry->next_state));
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->post_skip];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntryIndividual(fsm, index);
// Return if the subband is decoded completely
if (entry == NULL)
{
assert(rowptr <= bandendptr);
ResetFSMIndividual(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSMIndividual(fsm, (entry->next_state));
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->post_skip];
}
}
#endif
// Decode the highpass band coefficients but do not write them out - used in SIF mode
bool SkipBandFSM(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte;
FSMENTRY *entry;
pitch /= sizeof(PIXEL8S);
// The Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Decode runs and magnitude values until the entire band is decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER) {
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER) {
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
}
}
#if _TIMING
extern TIMER tk_fastruns;
#endif
#if 0 //dan20041030 not used
// New version of coefficient runs decoder that uses a finite state machine with a scaling factor
bool DecodeFastRunsFSM8s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
FILE *logfile = decoder->logfile;
int result;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; //DAN20041026
// All rows are treated as one long row that covers the entire band
int size = fsm->table.num_states;
PIXEL *rowptr;
int row = 0;
int pitch;
int pixel_type = wavelet->pixel_type[band_index];
decoder->codec.active_codebook = 0; // reset CODEC state
// Must have a valid wavelet
assert(wavelet != NULL);
if (wavelet == NULL) return false;
//Must have a valid FSM
assert(fsm != NULL);
if(fsm == NULL) return false;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is intended for 8-bit pixels
assert(pixel_type == PIXEL_TYPE_8S);
START(tk_fastruns);
rowptr = (PIXEL *)wavelet->band[band_index];
pitch = wavelet->pitch8s; // Use the 8-bit pitch
//pitch = wavelet->pitch;
// The finite state machine does not support a marker at the end of rows
#if RUNS_ROWEND_MARKER
assert(0);
#endif
// Get one byte from the bitstream and decode 4 bits at a time
result = DecodeBandFSM8sNoGap(fsm, stream, (PIXEL8S *)rowptr, width, height, pitch);
assert(result == true);
if (result != true) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
#if (0 && DEBUG && _WIN32)
_CrtCheckMemory();
#endif
#if (0 && DEBUG)
if (logfile)
DumpBand("Band", wavelet, band_index, NULL, logfile);
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "DecodeFastRunsFSM8s, band index: %d\n", band_index);
DumpWaveletRow(wavelet, band_index, 0, logfile);
}
#endif
end:
STOP(tk_fastruns);
return true;
}
#endif
#if _DEQUANTIZE_IN_FSM
void ReQuantFSM(FSM *fsm, int quant)
{
int count = 0;
int i, j;
short *restore = &fsm->restoreFSM[0];
#if !_INDIVIDUAL_ENTRY
for (i = 0; i < fsm->table.num_states; i++)
{
FSMENTRY *entry = fsm->table.entries[i];
for (j = 0; j < (1 << FSM_INDEX_SIZE); j++)
{
entry[j].value0 = restore[count++];
entry[j].value1 = restore[count++];
}
}
#else
for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++)
{
FSMENTRY *entry = fsm_table.entries_ind[i];
if(entry)
{
entry->value0 = restore[count++];
entry->value1 = restore[count++];
}
}
#endif
}
void DeQuantFSM(FSM *fsm, int quant)
{
int i, j;
if(fsm->LastQuant > 1 && fsm->LastQuant != quant)
{
ReQuantFSM(fsm, fsm->LastQuant);
}
else if(fsm->LastQuant == quant)
{
return;
}
if(fsm->InitizedRestore == 0)
{
short *restore = &fsm->restoreFSM[0];
int count = 0;
#if !_INDIVIDUAL_ENTRY
for (i = 0; i < fsm->table.num_states; i++)
{
FSMENTRY *entry = fsm->table.entries[i];
for (j = 0; j < (1 << FSM_INDEX_SIZE); j++)
{
restore[count++] = entry[j].value0;
restore[count++] = entry[j].value1;
}
}
#else
for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++)
{
FSMENTRY *entry = fsm->table.entries_ind[i];
if(entry)
{
restore[count++] = entry->value0;
restore[count++] = entry->value1;
}
}
#endif
fsm->InitizedRestore = 1;
}
#if !_INDIVIDUAL_ENTRY
for (i = 0; i < fsm->table.num_states; i++)
{
FSMENTRY *entry = fsm->table.entries[i];
for (j = 0; j < (1 << FSM_INDEX_SIZE); j++)
{
if(entry[j].value0 < 0x7ff0) // band end trailer
entry[j].value0 *= quant;
entry[j].value1 *= quant;
}
}
#else
for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++)
{
FSMENTRY *entry = fsm->table.entries_ind[i];
if(entry)
{
if(entry->value0 < 0x7ff0) // band end trailer etc
entry->value0 *= quant;
entry->value1 *= quant;
}
}
#endif
fsm->LastQuant = quant;
}
#endif // _DEQUANTIZE_IN_FSM
// New version of coefficient runs decoder that uses a finite state machine with a scaling factor
//dan 7-11-03
bool DecodeFastRunsFSM16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height, int threading)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
int result = true;
int quant = wavelet->quantization[band_index];
int active_codebook = decoder->codec.active_codebook;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[active_codebook];
int size;
PIXEL *rowptr;
//int row = 0;
int pitch;
CODEC_STATE *codec = &decoder->codec;
//int channel = codec->channel;
//int subband = codec->band.subband;
//int num_subbands = codec->num_subbands;
//int pixel_type = wavelet->pixel_type[band_index];
int difference_coding = decoder->codec.difference_coding;
//int localquant = 1;
int peaklevel = 0;
//int peaksize = 0;
PIXEL *peakbase = NULL;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Subband: %d, active_codebook: %d, difference_coding: %d\n",
subband, decoder->codec.active_codebook, difference_coding);
}
#endif
decoder->codec.active_codebook = 0; // reset CODEC state
decoder->codec.difference_coding = 0; //reset state for next subband
// Must have a valid wavelet
assert(wavelet != NULL);
if (wavelet == NULL) return false;
//Must have a valid FSM
assert(fsm != NULL);
if(fsm == NULL) return false;
// All rows are treated as one long row that covers the entire band
size = fsm->table.num_states;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is intended for 8-bit pixels
assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_16S);
START(tk_fastruns);
rowptr = (PIXEL *)wavelet->band[band_index];
//pitch = wavelet->pitch8s; // Use the 8-bit pitch
pitch = wavelet->pitch;
peaklevel = codec->peak_table.level;
peakbase = codec->peak_table.base;
#if _THREADED
threading = decoder->entropy_worker_new.pool.thread_count > 1 ? threading : 0;
if(threading)
{
decoder->entropy_worker_new.threads_used = 1;
{
//int start = stream->nWordsUsed;
int end;
struct entropy_data_new *data;
int next_queue_num = decoder->entropy_worker_new.next_queue_num++;
data = &decoder->entropy_worker_new.entropy_data[next_queue_num];
memcpy(&data->stream,stream, sizeof(BITSTREAM));
data->rowptr = rowptr;
data->width = width;
data->height = height;
data->pitch = pitch;
data->peaks = peakbase;
data->level = peaklevel;
data->quant = quant;
data->wavelet = wavelet;
data->band_index = band_index;
data->active_codebook = active_codebook;
data->difference_coding = difference_coding;
// Start only a particular threadid
if(next_queue_num == 0)
{
ThreadPoolSetWorkCount(&decoder->entropy_worker_new.pool, 1);
#if _DELAYED_THREAD_START==0
ThreadPoolSendMessage(&decoder->entropy_worker_new.pool, THREAD_MESSAGE_START);
#endif
}
else
{ // Set the work count to the number of rows to process
ThreadPoolAddWorkCount(&decoder->entropy_worker_new.pool, 1);
}
{
unsigned short tag = *(stream->lpCurrentWord-8) << 8;
if(tag == (unsigned short)OPTIONALTAG(CODEC_TAG_SUBBAND_SIZE))
{
int chunksize;
int value = *(stream->lpCurrentWord-6) << 8;
value |= *(stream->lpCurrentWord-5);
tag |= *(stream->lpCurrentWord-7);
tag = NEG(tag);
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
chunksize *= 4;
chunksize -= 8;
{
uint32_t *ptr = (uint32_t *)stream->lpCurrentWord;
ptr += (chunksize>>2);
if(*ptr != 0x00003800) // bandend
{
goto continuesearch;
}
}
stream->lpCurrentWord += chunksize;
stream->nWordsUsed -= chunksize;
end = stream->nWordsUsed;
}
else
{
continuesearch:
while(*((uint32_t *)stream->lpCurrentWord) != 0x00003800) // bandend
{
stream->lpCurrentWord += 4;
stream->nWordsUsed -= 4;
}
end = stream->nWordsUsed;
}
}
}
}
else
#endif // _THREADED
{
DeQuantFSM(fsm, quant);
if (peaklevel)
{
result = DecodeBandFSM16sNoGapWithPeaks(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, peakbase, peaklevel, 1);
}
else
{
#if _DEBUG
result = DecodeBandFSM16sNoGap(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, logfile);
#else
result = DecodeBandFSM16sNoGap(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch);
#endif
}
if(difference_coding)
{
int x,y;
PIXEL *line = rowptr;
for(y=0;y<height;y++)
{
for(x=1;x<width;x++)
{
line[x] += line[x-1];
}
line += pitch/2;
}
}
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band_index);
}
}
assert(result == true);
if (result != true) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
//end:
STOP(tk_fastruns);
return true;
}
bool SkipFastRunsFSM(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
int result;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; //DAN20041026
// All rows are treated as one long row that covers the entire band
int size = fsm->table.num_states;
PIXEL *rowptr;
//int row = 0;
int pitch;
//int pixel_type = wavelet->pixel_type[band_index];
decoder->codec.active_codebook = 0; // reset CODEC state
// Must have a valid wavelet
assert(wavelet != NULL);
if (wavelet == NULL) return false;
//Must have a valid FSM
assert(fsm != NULL);
if(fsm == NULL) return false;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is 8bit/pixel
assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_8S);
START(tk_fastruns);
rowptr = (PIXEL *)wavelet->band[band_index];
pitch = wavelet->pitch8s; // Use the 8-bit pitch
// The finite state machine does not support a marker at the end of rows
#if RUNS_ROWEND_MARKER
assert(0);
#endif
#if 1 // Get one byte from the bitstream and decode 4 bits at a time
result = SkipBandFSM(fsm, stream, (PIXEL8S *)rowptr, width, height, pitch);
assert(result == true);
if (result != true) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
#endif
#if (0 && DEBUG && _WIN32)
_CrtCheckMemory();
#endif
#if (0 && DEBUG)
if (logfile)
DumpBand("Band", wavelet, band_index, NULL, logfile);
#endif
//end:
STOP(tk_fastruns);
return true;
}
// The third version is also based on the finite state machine decoder with
// gaps between rows encoded as zero runs, but dequantization is performed as
// the highpass values are read from the bitstream and placed into a row buffer.
// The highpass values are not written into the wavelet highpass band.
// Eventually this routine will be merged into the routine DecodeTemporalBand8s
// since this routine contains code specific to the inverse temporal transform
// and DecodeTemporalBand8s has become a shell.
#if 0
bool DecodeBandRunsFSM8s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height,
IMAGE *frame0, IMAGE *frame1)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
FILE *logfile = decoder->logfile;
int result;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm;
// All rows are treated as one long row that covers the entire band
int size = fsm->table.num_states;
PIXEL *lowpass = wavelet->band[0];
int lowpass_pitch = wavelet->pitch;
//PIXEL8S *rowptr;
int row = 0;
int pitch;
int row_width; // Width of the encoded row of highpass coefficients
PIXEL *even = frame0->band[0];
PIXEL *odd = frame1->band[0];
int even_pitch = frame0->pitch;
int odd_pitch = frame1->pitch;
int pixel_type = wavelet->pixel_type[band_index];
int quantization = wavelet->quantization[band_index];
PIXEL *buffer;
size_t buffer_size;
int index, byte;
FSMENTRY *entry;
int column = 0;
int32_t value;
int buffer_row_size;
PIXEL *highpass;
// Check that the wavelet into which the band will be decoded is valid
assert(wavelet != NULL);
if (wavelet == NULL) return false;
// Check that the finite state machine is valid
assert(fsm != NULL);
if (fsm == NULL) return false;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check that the band was encoded using 8-bit signed coefficients
assert(pixel_type == PIXEL_TYPE_8S);
pitch = wavelet->pitch8s; // Use the pitch for 8-bit packed rows
// Get the buffer for storing one row of dequantized highpass coefficients
buffer = (PIXEL *)decoder->buffer;
buffer_size = decoder->buffer_size;
// The finite state machine does not support a marker at the end of each row
assert(RUNS_ROWEND_MARKER == 0);
/***** Start of code included from DecodeBandFSM8s() *****/
// Check that one byte can be processes as two 4-bit nibbles
assert(BITSTREAM_WORD_SIZE == (2 * FSM_INDEX_SIZE));
// Check that the bitstream buffer is empty
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Convert the pitch to units of pixels
pitch /= sizeof(PIXEL8S);
buffer_row_size = pitch * sizeof(PIXEL);
lowpass_pitch /= sizeof(PIXEL);
even_pitch /= sizeof(PIXEL);
odd_pitch /= sizeof(PIXEL);
// Compute the address of the row after the last row in the band
//maxptr = rowptr + height * pitch;
// Round up the row length (in bytes) to a multiple of 16 bytes
//row_size = ALIGN16(row_size);
// Check that the buffer is large enough to hold one row
//assert(buffer_size >= row_size);
assert(buffer_size >= buffer_row_size);
// Use the buffer for the row or highpass coefficients
highpass = buffer;
#if 1
// The row spans the allocated width (pitch) of the band in no gap mode
row_width = pitch;
#else
// For debugging
row_width = wavelet->encoded_pitch/sizeof(PIXEL8S);
#endif
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
// Decode zero runs and magnitude values (with appended sign bit)
// until the marker for the band end trailer has been decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
/***** Decode the first 4-bit nibble *****/
// Decode the first 4-bit nibble
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return when the entire band is decoded
if (entry->value0 == BAND_END_TRAILER)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Process the rest of the subband
ZeroHighPassRow(highpass, buffer_row_size);
while (++row < height)
{
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
}
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0)
{
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0)
{
value = entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < row_width);
// Dequantize the value and store it in the highpass row buffer
highpass[column] = quantization * value;
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < row_width);
if (column < (row_width - 1)) {
// Store both values in the current row
highpass[column++] = quantization * entry->value0;
highpass[column++] = quantization * entry->value1;
}
else {
value = entry->value0;
highpass[column] = quantization * value;
value = entry->value1;
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
column = 0;
highpass[column++] = quantization * value;
}
}
/***** Decode the second 4-bit nibble *****/
// Decode the second 4-bit nibble
index = byte & FSM_INDEX_MASK;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Process the rest of the subband
ZeroHighPassRow(highpass, buffer_row_size);
while (++row < height)
{
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
}
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0)
{
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0)
{
value = entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < row_width);
highpass[column] = quantization * value;
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < row_width);
if (column < (row_width - 1)) {
// Store both highpass values in the current row
highpass[column++] = quantization * entry->value0;
highpass[column++] = quantization * entry->value1;
}
else {
highpass[column] = quantization * entry->value0;
value = entry->value1;
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
column = 0;
highpass[column++] = quantization * value;
}
}
}
/***** End of the code included from DecodeBandFSM8s() *****/
#if 0
assert(result == true);
if (result != true) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
#endif
#if (0 && DEBUG && _WIN32)
_CrtCheckMemory();
#endif
#if (0 && DEBUG)
if (logfile)
DumpBand("Band", wavelet, band_index, NULL, logfile);
#endif
#if 0
end:
return true;
#endif
}
#endif
/***** End of the code for the finite state machine decoder *****/
#if 1
// The second version applies the horizontal inverse filters row by row, so the
// memory access pattern is more efficient. The lowpass and highpass temporal
// coefficients for each row are inverted and packed into the output in one pass.
// Apply the inverse horizontal-temporal transform and pack the output into a buffer
void TransformInverseFrameToYUV(TRANSFORM *transform[], int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
const SCRATCH *scratch, int chroma_offset, int precision)
{
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Pointers to the rows in the temporal wavelet for each channel
PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS];
PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
#if DEBUG
size_t buffer_size = scratch->free_size;
#endif
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int output_width;
int channel;
int row;
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Check that the buffer is large enough
#if DEBUG
assert((2 * num_channels * temporal_row_size) <= buffer_size);
#endif
// Allocate buffers for a single row of lowpass and highpass temporal coefficients
// and initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Compute the 8-bit pitch in units of pixels
horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL);
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
//TODO: Need to recode the buffer allocations using the scratch space API
// Divide the buffer into temporal lowpass and highpass rows
temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size);
temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size);
}
// Process one row at a time from each channel
for (row = 0; row < half_height; row++)
{
PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size);
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
//int pitch8s = horizontal_pitch8s[channel];
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Invert the horizontal transform applied to the temporal highpass row
//DAN20051004 -- possible reversiblity issue
//InvertHorizontalRow8sBuffered //----------------------- Maybe bad
InvertHorizontalRow16s8sTo16sBuffered(horizontal_highlow[channel], highlow_quantization[channel],
(PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel],
temporal_highpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Advance to the next row in each horizontal band in this channel
horizontal_lowlow[channel] += pitch;
horizontal_lowhigh[channel] += pitch;
horizontal_highlow[channel] += pitch;
horizontal_highhigh[channel] += pitch;
}
// The output width is twice the width of the wavelet bands
output_width = 2 * horizontal_width[0];
// Adjust the frame width to fill to the end of each row
//frame_width = output_pitch / 2;
if (precision == CODEC_PRECISION_10BIT)
{
// Invert the temporal bands from all channels and pack output pixels
switch (frame->format)
{
// Need to reduce the resolution from 10 bits to 8 bits during the inverse
case DECODED_FORMAT_YUYV:
InvertInterlacedRow16s10bitToYUV(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
chroma_offset);
break;
case DECODED_FORMAT_UYVY:
InvertInterlacedRow16s10bitToUYVY(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
chroma_offset);
break;
default:
assert(0);
break;
}
}
else // Older code for 8-bit precision
{
int format;
assert(precision == CODEC_PRECISION_8BIT);
switch (frame->format)
{
case DECODED_FORMAT_YUYV:
format = COLOR_FORMAT_YUYV;
break;
case DECODED_FORMAT_UYVY:
format = COLOR_FORMAT_UYVY;
break;
}
// Invert the temporal bands from all channels and pack output pixels
InvertInterlacedRow16sToYUV(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
chroma_offset, format);
}
// Advance to the next row in the packed output image
output += field_pitch;
}
}
#endif
#if _INTERLACED_WORKER_THREADS
void TransformInverseFrameSectionToYUV(DECODER *decoder, int thread_index, int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
int chroma_offset, int precision)
{
FILE *logfile = decoder->logfile;
TRANSFORM **transform = decoder->transform;
const SCRATCH *scratch = &decoder->scratch;
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Pointers to the rows in the temporal wavelet for each channel
PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS];
PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
uint8_t *output_row_ptr = output;
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int output_width;
int channel;
int row;
HANDLE row_semaphore = decoder->interlaced_worker.row_semaphore;
int return_value;
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Divide the buffer space between the four threads
buffer_size /= 4;
buffer += buffer_size * thread_index;
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Check that the buffer is large enough
assert((2 * num_channels * temporal_row_size) <= buffer_size);
// Allocate buffers for a single row of lowpass and highpass temporal coefficients
// and initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Compute the 8-bit pitch in units of pixels
horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL);
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
//TODO: Need to recode the buffer allocations using the scratch space API
// Divide the buffer into temporal lowpass and highpass rows
temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size);
temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size);
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Output buffer: %d (0x%p)\n", output, output);
}
#endif
/* if (thread_index == 0)
{
row = 0;
row_step = 1;
}
else if (thread_index == 1)
{
row = half_height - 1;
row_step = -1;
// Move to the bottom of the transform and process moving up
for (channel = 0; channel < num_channels; channel++)
{
int offset = horizontal_pitch[channel] * (half_height - 1);
horizontal_lowlow[channel] += offset;
horizontal_lowhigh[channel] += offset;
horizontal_highlow[channel] += offset;
horizontal_highhigh[channel] += offset;
horizontal_pitch[channel] = NEG(horizontal_pitch[channel]);
horizontal_pitch8s[channel] = NEG(horizontal_pitch8s[channel]);
}
output += field_pitch * (half_height - 1);
field_pitch = NEG(field_pitch);
}
else
{
assert(0); // what about middle threads?
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread index: %d, start row: %d, row step: %d, field_pitch: %d\n",
thread_index, row, row_step, field_pitch);
}
#endif
*/
// Loop until all of the rows have been processed
for (;;)
{
// Wait for one row from each channel to invert the transform
return_value = WaitForSingleObject(row_semaphore, 0);
// Determine the index of this worker thread
if (return_value == WAIT_OBJECT_0)
{
if(decoder->interlaced_worker.lock_init)
{
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
row = decoder->interlaced_worker.current_row++;
if(decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
output_row_ptr = output;
output_row_ptr += row * 2 * output_pitch;
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
horizontal_lowlow[channel] += pitch*row;
horizontal_lowhigh[channel] += pitch*row;
horizontal_highlow[channel] += pitch*row;
horizontal_highhigh[channel] += pitch*row;
}
}
if (return_value == WAIT_OBJECT_0 && 0 <= row && row < half_height)
{
//PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size);
PIXEL *line_buffer = (PIXEL *)(buffer + 2 * num_channels * temporal_row_size);
// assert(0 <= row && row < half_height);
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Processing row: %d, thread index: %d, output: %d (0x%p)\n",
row, thread_index, output_row_ptr);
}
#endif
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
//int pitch8s = horizontal_pitch8s[channel];
#if (0 && DEBUG)
// Invert the horizontal transform by duplicating the lowpass pixels
InvertHorizontalRowDuplicated16s(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel], horizontal_width[channel],
(PIXEL *)line_buffer);
#else
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
#endif
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow8sBuffered((PIXEL8S *)horizontal_highlow[channel], highlow_quantization[channel],
(PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel],
temporal_highpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Advance to the next row in each horizontal band in this channel
//horizontal_lowlow[channel] += pitch;
//horizontal_lowhigh[channel] += pitch;
//horizontal_highlow[channel] += pitch;
//horizontal_highhigh[channel] += pitch;
}
// The output width is twice the width of the wavelet bands
output_width = 2 * horizontal_width[0];
// Adjust the frame width to fill to the end of each row
//frame_width = output_pitch / 2;
if (precision == CODEC_PRECISION_10BIT)
{
// Invert the temporal bands from all channels and pack output pixels
switch (frame->format)
{
// Need to reduce the resolution from 10 bits to 8 bits during the inverse
case DECODED_FORMAT_YUYV:
InvertInterlacedRow16s10bitToYUV(temporal_lowpass, temporal_highpass, num_channels,
output_row_ptr, output_pitch, output_width, frame_width,
chroma_offset);
break;
case DECODED_FORMAT_UYVY:
InvertInterlacedRow16s10bitToUYVY(temporal_lowpass, temporal_highpass, num_channels,
output_row_ptr, output_pitch, output_width, frame_width,
chroma_offset);
break;
default:
assert(0);
break;
}
}
else // Older code for 8-bit precision
{
int format;
assert(precision == CODEC_PRECISION_8BIT);
switch (frame->format)
{
case DECODED_FORMAT_YUYV:
format = COLOR_FORMAT_YUYV;
break;
case DECODED_FORMAT_UYVY:
format = COLOR_FORMAT_UYVY;
break;
}
// Invert the temporal bands from all channels and pack output pixels
InvertInterlacedRow16sToYUV(temporal_lowpass, temporal_highpass, num_channels,
output_row_ptr, output_pitch, output_width, frame_width,
chroma_offset, format);
}
// Advance to the next row in the input transforms
//row += row_step;
// Advance to the next row in the packed output image
//output += field_pitch;
}
else
{
// No more rows to process
break;
}
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Finished transform, thread index: %d\n", thread_index);
}
#endif
}
#endif
//#if BUILD_PROSPECT
// Apply the inverse horizontal-temporal transform and output rows of luma and chroma
#if 0
void TransformInverseFrameToRow16u(TRANSFORM *transform[], int frame_index, int num_channels,
PIXEL16U *output, int output_pitch, FRAME_INFO *frame,
char *buffer, size_t buffer_size, int chroma_offset,
int precision)
#else
void TransformInverseFrameToRow16u(DECODER *decoder, TRANSFORM *transform[], int frame_index, int num_channels,
PIXEL16U *output, int output_pitch, FRAME_INFO *frame,
const SCRATCH *scratch, int chroma_offset,
int precision)
#endif
{
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
#if DEBUG
size_t buffer_size = scratch->free_size;
#endif
// Buffers for the rows in the temporal wavelet (reused for each channel)
PIXEL *temporal_lowpass;
PIXEL *temporal_highpass;
int output_row_width[TRANSFORM_MAX_CHANNELS];
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int luma_width = frame_width;
int chroma_width = luma_width/2;
int channel;
int row;
#if (1 && DEBUG_ROW16U)
PIXEL16U *output_buffer;
#endif
// This routine should only be called to decode rows of 16-bit luma and chroma
//assert(frame->format == DECODED_FORMAT_YR16);
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Buffer must be large enough for two rows of temporal coefficients (lowpass and highpass)
// plus the buffer used by the inverse horizontal transform for its intermediate results
#if DEBUG
assert((2 * temporal_row_size) <= buffer_size);
#endif
// Allocate buffers for one row of lowpass and highpass temporal coefficients
temporal_lowpass = (PIXEL *)&buffer[0];
temporal_highpass = (PIXEL *)&buffer[temporal_row_size];
#if (1 && DEBUG_ROW16U)
output_buffer = (PIXEL16U *)&buffer[2 * temporal_row_size];
#endif
// Initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
// Compute the width of each row of output pixels
output_row_width[channel] = (channel == 0) ? luma_width : chroma_width;
}
// Process one row at a time from each channel
for (row = 0; row < half_height; row++)
{
#if (1 && DEBUG_ROW16U)
PIXEL16U *output_row_ptr = output_buffer;
PIXEL16U *planar_output[TRANSFORM_MAX_CHANNELS];
int planar_pitch[TRANSFORM_MAX_CHANNELS];
ROI strip = {luma_width, 2};
uint8_t *yuv_output = (uint8_t *)output;
uint8_t *output1 = yuv_output;
uint8_t *output2 = yuv_output + output_pitch;
#else
PIXEL16U *output_row_ptr = output;
#endif
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
// Invert the horizontal transform applied to the temporal lowpass row
BypassHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
BypassHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
}
else
{
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
}
//***DEBUG***
//ZeroMemory(temporal_highpass, temporal_row_size);
//FillPixelMemory(temporal_highpass, temporal_row_size/sizeof(PIXEL), 50);
// Advance to the next row in each horizontal band in this channel
horizontal_lowlow[channel] += pitch;
horizontal_lowhigh[channel] += pitch;
horizontal_highlow[channel] += pitch;
horizontal_highhigh[channel] += pitch;
#if (1 && DEBUG_ROW16U)
// Write the rows of 16-bit pixels to a temporary buffer
planar_output[channel] = output_row_ptr;
planar_pitch[channel] = output_pitch * sizeof(PIXEL);
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
planar_output[channel], planar_pitch[channel],
output_row_width[channel],
frame_width, chroma_offset, precision);
//if (channel > 0)
if (0)
{
uint8_t *output3 = (uint8_t *)planar_output[channel];
uint8_t *output4 = (uint8_t *)output3 + planar_pitch[channel];
int output_size = output_row_width[channel] * sizeof(PIXEL);
int fill_value = (128 << 8);
//ZeroMemory(output3, output_size);
//ZeroMemory(output4, output_size);
FillPixelMemory((PIXEL *)output3, output_row_width[channel], fill_value);
FillPixelMemory((PIXEL *)output4, output_row_width[channel], fill_value);
}
#else
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
output_row_ptr, output_pitch, output_row_width[channel],
frame_width, chroma_offset, precision);
#endif
// Advance the output row pointer to the next channel
output_row_ptr += output_row_width[channel];
// Check the output row alignment
assert(ISALIGNED16(output_row_ptr));
}
// Advance to the next group of rows in the output image
output += field_pitch/sizeof(PIXEL16U);
}
}
//#endif
#if _INTERLACED_WORKER_THREADS
void TransformInverseFrameSectionToRow16u(DECODER *decoder, int thread_index, int frame_index, int num_channels,
PIXEL16U *output, int output_pitch, FRAME_INFO *frame,
int chroma_offset, int precision)
{
FILE *logfile = decoder->logfile;
TRANSFORM **transform = decoder->transform;
const SCRATCH *scratch = &decoder->scratch;
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
// Buffers for the rows in the temporal wavelet (reused for each channel)
PIXEL *temporal_lowpass;
PIXEL *temporal_highpass;
int output_row_width[TRANSFORM_MAX_CHANNELS];
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int luma_width = frame_width;
int chroma_width = luma_width/2;
int channel;
int row;
HANDLE row_semaphore = decoder->interlaced_worker.row_semaphore;
int return_value;
#if (1 && DEBUG_ROW16U)
PIXEL16U *output_buffer;
#endif
// This routine should only be called to decode rows of 16-bit luma and chroma
//assert(frame->format == DECODED_FORMAT_YR16);
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
#if 0
if (thread_index == 1)
{
// Skip over the buffer space used by the other thread
size_t buffer_usage = 2 * temporal_row_size;
buffer += buffer_usage;
buffer_size -= buffer_usage;
}
#else
// Divide the buffer space between the two threads
buffer_size /= 4;
buffer += buffer_size * thread_index;
#endif
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Buffer must be large enough for two rows of temporal coefficients (lowpass and highpass)
// plus the buffer used by the inverse horizontal transform for its intermediate results
assert((2 * temporal_row_size) <= buffer_size);
// Allocate buffers for one row of lowpass and highpass temporal coefficients
temporal_lowpass = (PIXEL *)&buffer[0];
temporal_highpass = (PIXEL *)&buffer[temporal_row_size];
#if (1 && DEBUG_ROW16U)
output_buffer = (PIXEL16U *)&buffer[2 * temporal_row_size];
#endif
// Initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
// Compute the width of each row of output pixels
output_row_width[channel] = (channel == 0) ? luma_width : chroma_width;
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Output buffer: %d (0x%p)\n", output, output);
}
#endif
/* if (thread_index == 0)
{
row = 0;
row_step = 1;
}
else if (thread_index == 1)
{
row = half_height - 1;
row_step = -1;
// Move to the bottom of the transform and process moving up
for (channel = 0; channel < num_channels; channel++)
{
int offset = horizontal_pitch[channel] * (half_height - 1);
horizontal_lowlow[channel] += offset;
horizontal_lowhigh[channel] += offset;
horizontal_highlow[channel] += offset;
horizontal_highhigh[channel] += offset;
horizontal_pitch[channel] = NEG(horizontal_pitch[channel]);
//horizontal_pitch8s[channel] = NEG(horizontal_pitch8s[channel]);
}
//output += field_pitch * (half_height - 1);
output += (frame_height - 1) * output_pitch/sizeof(PIXEL16U);
output_pitch = NEG(output_pitch);
field_pitch = NEG(field_pitch);
}
else
{
assert(0); // middle threads
}
*/
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread index: %d, start row: %d, row step: %d, field_pitch: %d\n",
thread_index, row, row_step, field_pitch);
}
#endif
// Loop until all of the rows have been processed
for (;;)
{
PIXEL16U *output_row_ptr;
// Wait for one row from each channel to invert the transform
return_value = WaitForSingleObject(row_semaphore, 0);
// Determine the index of this worker thread
if (return_value == WAIT_OBJECT_0)
{
if(decoder->interlaced_worker.lock_init)
{
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
row = decoder->interlaced_worker.current_row++;
if(decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
output_row_ptr = output;
output_row_ptr += row * output_pitch;
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
horizontal_lowlow[channel] += pitch*row;
horizontal_lowhigh[channel] += pitch*row;
horizontal_highlow[channel] += pitch*row;
horizontal_highhigh[channel] += pitch*row;
}
}
if (return_value == WAIT_OBJECT_0 && 0 <= row && row < half_height)
{
assert(0 <= row && row < half_height);
if(decoder->frame.resolution == DECODED_RESOLUTION_FULL)
{
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
output_row_ptr, output_pitch, output_row_width[channel],
frame_width, chroma_offset, precision);
// Advance the output row pointer to the next channel
output_row_ptr += output_row_width[channel];
}
}
else if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
// Invert the horizontal transform applied to the temporal lowpass row
BypassHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
BypassHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
output_row_ptr, output_pitch, output_row_width[channel],
frame_width, chroma_offset, precision);
// Advance the output row pointer to the next channel
output_row_ptr += output_row_width[channel];
}
}
}
else
{
// No more rows to process
break;
}
}
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "Finished transform, thread index: %d\n", thread_index);
}
#endif
}
#endif
#if 0
DWORD WINAPI TransformInverseFrameToRow16utopThread(LPVOID param)
{
struct data
{
TRANSFORM *transform[3];
int frame_index;
int num_channels;
uint8_t *output;
int output_pitch;
FRAME_INFO *info;
SCRATCH *scratch;
int chroma_offset;
int precision;
} *dptr;
dptr = (struct data *)param;
TransformInverseFrameToRow16utop(dptr->transform, dptr->frame_index, dptr->num_channels,
(PIXEL16U *)dptr->output, dptr->output_pitch, dptr->info,
dptr->scratch, dptr->chroma_offset, dptr->precision);
return 0;
}
DWORD WINAPI TransformInverseFrameToRow16ubottomThread(LPVOID param)
{
struct data
{
TRANSFORM *transform[3];
int frame_index;
int num_channels;
uint8_t *output;
int output_pitch;
FRAME_INFO *info;
SCRATCH *scratch;
int chroma_offset;
int precision;
} *dptr;
dptr = (struct data *)param;
TransformInverseFrameToRow16ubottom(dptr->transform, dptr->frame_index, dptr->num_channels,
(PIXEL16U *)dptr->output, dptr->output_pitch, dptr->info,
dptr->scratch, dptr->chroma_offset, dptr->precision);
return 0;
}
#endif
extern void fast_srand( int seed );
// Apply the inverse horizontal-temporal transform and pack the output into a buffer
#if 0
void TransformInverseFrameToBuffer(TRANSFORM *transform[], int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
char *buffer, size_t buffer_size, int chroma_offset,
int precision)
#else
void TransformInverseFrameToBuffer(TRANSFORM *transform[], int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
const SCRATCH *scratch, int chroma_offset, int precision)
#endif
{
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
//int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
// Pointers to the rows in the temporal wavelet for each channel
PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS];
PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS];
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
size_t temporal_buffer_size = 2 * num_channels * temporal_row_size;
#if DEBUG
size_t yuv_row_size = frame_width * 2;
#endif
char *yuv_buffer;
size_t yuv_buffer_size;
int field_pitch = 2 * output_pitch;
int format = frame->format;
bool inverted = (format == DECODED_FORMAT_RGB24 || format == DECODED_FORMAT_RGB32);
int output_width;
int channel;
int row;
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Check that the buffer is large enough
assert((2 * num_channels * temporal_row_size) <= buffer_size);
// Allocate buffers for a single row of lowpass and highpass temporal coefficients
// and initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Compute the 8-bit pitch in units of pixels
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL);
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
// Divide the buffer into temporal lowpass and highpass rows
temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size);
temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size);
}
// Allocate buffer space for the intermediate YUV data
yuv_buffer = buffer + temporal_buffer_size;
yuv_buffer_size = buffer_size - temporal_buffer_size;
#if DEBUG
assert(yuv_buffer_size >= 2 * yuv_row_size);
#endif
if (inverted)
{
output += (frame_height - 1) * output_pitch;
output_pitch = (- output_pitch);
field_pitch = (- field_pitch);
}
// Process one row at a time from each channel
for (row = 0; row < half_height; row++)
{
PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size);
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
//int pitch8s = horizontal_pitch8s[channel];
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow8sBuffered((PIXEL8S *)horizontal_highlow[channel], highlow_quantization[channel],
(PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel],
temporal_highpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Advance to the next row in each horizontal band in this channel
horizontal_lowlow[channel] += pitch;
horizontal_lowhigh[channel] += pitch;
horizontal_highlow[channel] += pitch;
horizontal_highhigh[channel] += pitch;
}
// The output width is twice the width of the wavelet bands
output_width = 2 * horizontal_width[0];
// Adjust the frame width to fill to the end of each row
//frame_width = output_pitch / 2;
//#if BUILD_PROSPECT
if (format == DECODED_FORMAT_V210 || format == DECODED_FORMAT_YU64)
{
// Invert the temporal bands from all channels and pack as V210 output
InvertInterlacedRow16sToV210(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
yuv_buffer, yuv_buffer_size, format, chroma_offset, precision);
}
else
//#endif
{
// Invert the temporal bands from all channels and pack as 8-bit output
InvertInterlacedRow16s(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
yuv_buffer, yuv_buffer_size, format, frame->colorspace,
chroma_offset, precision, row);
}
// Advance to the next row in the packed output image
output += field_pitch;
}
}
void CopyImageToBuffer(IMAGE *image, uint8_t *output_buffer, int32_t output_pitch, int format)
{
bool inverted = false;
size_t output_size;
START(tk_convert);
// Determine the type of conversion
switch (format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
ConvertImageToRGB(image, output_buffer, output_pitch, COLOR_FORMAT_RGB24, inverted);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertImageToRGB(image, output_buffer, output_pitch, COLOR_FORMAT_RGB32, inverted);
break;
#if 0
case DECODED_FORMAT_YUYV_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_YUYV:
ConvertImageToYUV(image, output_buffer, output_pitch, COLOR_FORMAT_YUYV, inverted);
break;
#if 0
case DECODED_FORMAT_UYVY_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_UYVY:
ConvertImageToYUV(image, output_buffer, output_pitch, COLOR_FORMAT_UYVY, inverted);
break;
default: // Unsupported format (return a blank frame)
assert(0);
output_size = image->height * output_pitch;
memset(output_buffer, COLOR_CHROMA_ZERO, output_size);
break;
}
STOP(tk_convert);
}
void SideLowpass16s10bitToYUYV(IMAGE *images[], uint8_t *output_buffer, int output_width, int output_height,
int output_pitch, bool inverted)
{
IMAGE *y_image = images[0];
IMAGE *u_image = images[1];
IMAGE *v_image = images[2];
int width = y_image->width;
int height = output_height;
PIXEL *y_row_ptr = y_image->band[0];
PIXEL *u_row_ptr = u_image->band[0];
PIXEL *v_row_ptr = v_image->band[0];
int y_pitch = y_image->pitch/sizeof(PIXEL);
int u_pitch = u_image->pitch/sizeof(PIXEL);
int v_pitch = v_image->pitch/sizeof(PIXEL);
uint8_t *outrow = output_buffer;
uint8_t *outptr;
int row, column;
// Definitions for optimization
//const int column_step = 2 * sizeof(__m64);
// Column at which post processing must begin
//int post_column = width - (width % column_step);
// The output pitch should be a positive number before inversion
assert(output_pitch > 0);
// Should the image be inverted?
if (inverted) {
outrow += (height - 1) * output_pitch; // Start at the bottom row
output_pitch = NEG(output_pitch); // Negate the pitch to go up
}
for (row = 0; row < height; row++)
{
outptr = outrow;
// Fill the rest of the output row
for (column = 0; column < width; column+=4)
{
int chroma_column = column>>1;
*(outptr++) = SATURATE_8U((y_row_ptr[column]+y_row_ptr[column+1])>>5);
*(outptr++) = SATURATE_8U((v_row_ptr[chroma_column]+v_row_ptr[chroma_column+1])>>5);
*(outptr++) = SATURATE_8U((y_row_ptr[column+2]+y_row_ptr[column+3])>>5);
*(outptr++) = SATURATE_8U((u_row_ptr[chroma_column]+u_row_ptr[chroma_column+1])>>5);
}
// Advance to the next rows in the input and output images
y_row_ptr += y_pitch;// 3D Work
u_row_ptr += u_pitch;
v_row_ptr += v_pitch;
outrow += output_pitch;
}
}
// Convert 16-bit signed lowpass data into packed RGB/YUV and store it in the output buffer
void CopyLowpass16sToBuffer(DECODER *decoder, IMAGE *images[], int num_channels, uint8_t *output_buffer, int32_t output_pitch,
FRAME_INFO *info, int chroma_offset, int precision, int encode_format, int whitebitdepth)
{
//IMAGE *image = frame->channel[0];
bool inverted = false;
int output_width = info->width;
int output_height = info->height;
int descale = precision - 8;
// Get the color format from the decoded format
int color_format = info->format & COLOR_FORMAT_MASK;
// Must compile this routine with switches set for decoding to 8-bit unsigned pixels
#if !defined(_DECODE_FRAME_8U) || (_DECODE_FRAME_8U == 0)
assert(0);
return;
#endif
START(tk_convert);
#if 0
// Fill the output buffer with blank values
EraseOutputBuffer(output_buffer, info->width, info->height, output_pitch, info->format);
#endif
// Determine the type of conversion
switch (info->format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGB48ToRGB(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB24, info->colorspace, inverted, descale, num_channels);
}
else
{
ConvertLowpass16sToRGBNoIPPFast(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB24, info->colorspace, inverted, descale);
}
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGB48ToRGB(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB32, info->colorspace, inverted, descale, num_channels);
}
else
{
ConvertLowpass16sToRGBNoIPPFast(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB32, info->colorspace, inverted, descale);
}
break;
case DECODED_FORMAT_RG48:
if(encode_format == ENCODED_FORMAT_BAYER)
{
ConvertLowpass16sBayerToRGB48(images, output_buffer, output_width, output_height,
output_pitch, 2, num_channels);
}
else if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
int scale = 1;
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
scale = 2;
ConvertLowpass16sRGB48ToRGB48(images, output_buffer, output_width, output_height,
output_pitch, scale, num_channels);
}
else
{
ConvertLowpass16sYUVtoRGB48(images, (uint8_t *)output_buffer, output_width,
output_height, output_pitch, info->colorspace, inverted, descale,
info->format, whitebitdepth);
}
break;
case DECODED_FORMAT_RG64:
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGBA64ToRGBA64(images, output_buffer, output_width, output_height, output_pitch,
descale, num_channels, info->format & 0xffff);
}
else
{
assert(0);
}
break;
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGBA64ToRGBA64(images, output_buffer, output_width, output_height, output_pitch,
descale, num_channels, info->format & 0xffff);
}
else
{
ConvertLowpass16sYUVtoRGB48(images, (uint8_t *)output_buffer, output_width,
output_height, output_pitch, info->colorspace, inverted, descale,
info->format, whitebitdepth);
}
break;
#if 0
case DECODED_FORMAT_YUYV_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
if (precision == CODEC_PRECISION_10BIT)
{
int lineskip = 1; // 3D Work
int pitch = output_pitch;
if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV)
{
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED) // 3d Work
{
lineskip = 2;
if(decoder->channel_blend_type == 3)
pitch *= 2;
}
}
if((decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || decoder->channel_blend_type == BLEND_FREEVIEW) && decoder->frame.format == DECODED_FORMAT_YUYV) //side by side
{
SideLowpass16s10bitToYUYV(images, output_buffer, output_width, output_height, pitch, inverted);
}
else
{
//ConvertLowpass16s10bitToYUV(images, output_buffer, output_width, output_height, pitch, COLOR_FORMAT_YUYV, inverted, lineskip);
ConvertLowpass16s10bitToYUV(images, output_buffer, output_width, output_height, pitch, color_format, inverted, lineskip);
}
}
else
{
//ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YUYV, inverted);
ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, color_format, inverted);
}
break;
#if 0
case DECODED_FORMAT_UYVY_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
#if 0
case DECODED_FORMAT_UYVY:
ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_UYVY, inverted);
break;
#endif
//#if BUILD_PROSPECT
case DECODED_FORMAT_V210:
if (precision == CODEC_PRECISION_10BIT)
{
ConvertLowpass16s10bitToV210(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_V210, inverted);
}
else
{
//ConvertLowpass16sToV210(images, output_buffer, output_width, output_pitch, COLOR_FORMAT_V210, inverted);
assert(0);
}
break;
//#endif
case DECODED_FORMAT_YU64:
// DAN04262004
ConvertLowpass16sToYUV64(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YU64, inverted, precision);
break;
//#if BUILD_PROSPECT
case DECODED_FORMAT_YR16:
ConvertLowpass16sToYR16(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YR16, inverted, precision);
break;
//#endif
default: // Unsupported format (output a blank frame)
assert(0);
break;
}
STOP(tk_convert);
}
void ConvertYUVStripPlanarToBuffer(uint8_t *planar_output[], int planar_pitch[], ROI roi,
uint8_t *output_buffer, int output_pitch, int frame_width,
int format, int colorspace)
{
bool inverted = false;
int output_width = roi.width;
#if !defined(_DECODE_FRAME_8U) || (_DECODE_FRAME_8U == 0)
#error Must set compile-time switches to decode to 8-bit pixels
#endif
START(tk_convert);
#if _ENCODE_CHROMA_OFFSET
#error Cannot handle images encoded with a non-zero chroma offset
#endif
// Determine the type of conversion
switch(format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
ConvertPlanarYUVToRGB(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB24, colorspace, inverted);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertPlanarYUVToRGB(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB32, colorspace, inverted);
break;
#if 0
case DECODED_FORMAT_YUYV_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_YUYV:
ConvertYUVStripPlanarToPacked(planar_output, planar_pitch, roi,
output_buffer, output_pitch, frame_width, format);
break;
#if 0
case DECODED_FORMAT_UYVY_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_UYVY:
ConvertPlanarYUVToUYVY(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_UYVY, colorspace, inverted);
break;
default: // Unsupported format (output a blank frame)
assert(0);
break;
}
STOP(tk_convert);
}
void ConvertRow16uToDitheredBuffer(DECODER *decoder, uint8_t *planar_output[], int planar_pitch[], ROI roi,
uint8_t *output_buffer, int output_pitch, int frame_width,
int format, int colorspace)
{
bool inverted = false;
int output_width = roi.width;
START(tk_convert);
// Determine the type of conversion
switch(format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
//ConvertPlanarYUVToRGB
ConvertRow16uToDitheredRGB(decoder, planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB24, colorspace, inverted);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertRow16uToDitheredRGB(decoder, planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB32, colorspace, inverted);
break;
case COLOR_FORMAT_WP13:
case COLOR_FORMAT_B64A:
case COLOR_FORMAT_RG48:
case COLOR_FORMAT_R210:
case COLOR_FORMAT_DPX0:
case COLOR_FORMAT_RG30:
case COLOR_FORMAT_AR10:
case COLOR_FORMAT_AB10:
ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, format, colorspace, NULL, NULL);
break;
case DECODED_FORMAT_YUYV:
assert(0);// These routines are not yet updated for ROW16u inputs
ConvertYUVStripPlanarToPacked(planar_output, planar_pitch, roi,
output_buffer, output_pitch, frame_width, format);
break;
case DECODED_FORMAT_UYVY:
assert(0);// These routines are not yet updated for ROW16u inputs
ConvertPlanarYUVToUYVY(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_UYVY, colorspace, inverted);
break;
default: // Unsupported format (output a blank frame)
assert(0);
break;
}
STOP(tk_convert);
}
// Convert one row of packed YUYV to the specified color
void ConvertRowYUYV(uint8_t *input, uint8_t *output, int length, int format, int colorspace, int precision)
{
size_t row_size = 2 * length;
bool inverted = false;
START(tk_convert);
// Determine the type of color conversion
switch (format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
ConvertYUYVRowToRGB(input, output, length, COLOR_FORMAT_RGB24, colorspace, precision);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertYUYVRowToRGB(input, output, length, COLOR_FORMAT_RGB32, colorspace, precision);
break;
case DECODED_FORMAT_YUYV:
if(precision == 8)
memcpy(output, input, row_size);
else
{
//need to dither to 8-bit
assert(0);
}
break;
case DECODED_FORMAT_UYVY:
if(precision == 8)
ConvertYUYVRowToUYVY(input, output, length, COLOR_FORMAT_UYVY);
else
{
//need to dither to 8-bit
assert(0);
}
break;
//#if BUILD_PROSPECT
case DECODED_FORMAT_V210:
assert(0); // should get here with 8bit data.
//ConvertYUYVRowToV210(input, output, length, COLOR_FORMAT_V210);
break;
case DECODED_FORMAT_YU64:
assert(0); // should get here with 8bit data.
//ConvertYUYVRowToYU64(input, output, length, COLOR_FORMAT_YU64);
break;
case DECODED_FORMAT_BYR3:
case DECODED_FORMAT_BYR4:
assert(0); // should get here with 8bit data.
//ConvertYUYVRowToYU64(input, output, length, COLOR_FORMAT_YU64);
break;
//#endif
default: // Unsupported format (output a blank frame)
assert(0);
memset(output, 0, row_size);
break;
}
STOP(tk_convert);
}
#if _THREADED_DECODER
IMAGE *GetWaveletThreadSafe(DECODER *decoder, TRANSFORM *transform, int index,
int width, int height, int level, int type)
{
IMAGE *wavelet = transform->wavelet[index];
assert(decoder != NULL && transform != NULL);
if (decoder != NULL && transform != NULL)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
// Lock access to the wavelet data
#if _DELAYED_THREAD_START==0
Lock(&decoder->entropy_worker_new.lock);
#endif
// Get the wavelet from the transform data structure (thread safe)
wavelet = transform->wavelet[index];
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
// Unlock access to the wavelet data
#if _DELAYED_THREAD_START==0
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
return wavelet;
}
// Update the codec state with the information in a tag value pair
CODEC_ERROR UpdateCodecState(DECODER *decoder, BITSTREAM *input, CODEC_STATE *codec, TAGWORD tag, TAGWORD value)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
bool optional = false;
int chunksize = 0;
bool result;
// Is this an optional tag?
if (tag < 0) {
tag = NEG(tag);
optional = true;
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "UpdateCodecState tag: %d, value: %d, optional: %d\n",
tag, value, optional);
}
#endif
switch (tag)
{
case CODEC_TAG_ZERO: // Used internally
assert(0); // Should not occur in the bitstream
error = CODEC_ERROR_INVALID_BITSTREAM;
break;
case CODEC_TAG_SAMPLE: // Type of sample
//assert(0);
if (value == SAMPLE_TYPE_CHANNEL)
{
result = DecodeSampleChannelHeader(decoder, input);
if (!result)
error = CODEC_ERROR_DECODE_SAMPLE_CHANNEL_HEADER;
else
error = CODEC_ERROR_OKAY;
}
break;
case CODEC_TAG_INDEX: // Sample index table
//assert(0); // Need to figure out how to return the group index
{
int count = value;
uint32_t *index = (uint32_t *)(&codec->channel_size[0]);
DecodeGroupIndex(input, index, count);
codec->num_channels = count;
}
break;
case CODEC_TAG_SUBBAND: // Has the decoder encountered a subband?
{ // This tag is obsolete and not used in modern streams
int subband = value;
// Check that the subband number makes sense
assert(0 <= subband && subband <= codec->max_subband);
if (! (0 <= subband && subband <= codec->max_subband))
{
error = CODEC_ERROR_DECODING_SUBBAND;
break;
}
// Decompress the subband
result = DecodeSampleSubband(decoder, input, subband);
if (!result)
error = CODEC_ERROR_DECODING_SUBBAND;
else
error = CODEC_ERROR_OKAY;
}
break;
case CODEC_TAG_BAND_HEADER: //CODEC_TAG_BAND_DIVISOR: // Band divisor. this is last TAG before subband data so act.
codec->band.divisor = value; // This tag value pair encodes the band divisor which is obsolete
{
// This tag value pair marks the beginning of the encoded coefficients
// The subband number has already been decoded
int subband = codec->band.subband;
result = DecodeSampleSubband(decoder, input, subband);
if (!result)
error = CODEC_ERROR_DECODING_SUBBAND;
else
error = CODEC_ERROR_OKAY;
}
break;
case CODEC_TAG_ENTRY: // Entry in sample index
assert(0); // Need to figure out how to return the group index
break;
case CODEC_TAG_MARKER: // Bitstream marker
{
int marker = value;
uint8_t *current_position;
// Save the current bitstream position
current_position = GetBitstreamPosition(input);
current_position -= 4; // Step back to before the GetSegment i.e. the TAG
if (IsLowPassHeaderMarker(marker))
{
// Save the bitstream position for the start of the channel
codec->channel_position = current_position;
}
else if (IsLowPassBandMarker(marker))
{
int subband = 0;
result = DecodeSampleSubband(decoder, input, subband);
if (!result)
error = CODEC_ERROR_DECODING_SUBBAND;
else
error = CODEC_ERROR_OKAY;
}
}
break;
case CODEC_TAG_VERSION_MAJOR: // Version
assert(0);
break;
case CODEC_TAG_VERSION_MINOR: // Minor version number
assert(0);
break;
case CODEC_TAG_VERSION_REVISION: // Revision number
assert(0);
break;
case CODEC_TAG_VERSION_EDIT: // Edit number
assert(0);
break;
case CODEC_TAG_SEQUENCE_FLAGS: // Video sequence flags
assert(0);
break;
case CODEC_TAG_TRANSFORM_TYPE: // Type of transform
assert(TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST);
if (TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST)
{
int i;
codec->transform_type = value;
for(i=0;i<TRANSFORM_MAX_CHANNELS;i++)
{
TRANSFORM *transform = decoder->transform[i];
if(transform)
{
GetTransformPrescale(transform, codec->transform_type, codec->precision);
}
}
}
else
error = CODEC_ERROR_TRANSFORM_TYPE;
break;
case CODEC_TAG_NUM_FRAMES: // Number of frames in the group
assert(0 <= value && value <= TRANSFORM_NUM_FRAMES);
if (0 <= value && value <= TRANSFORM_NUM_FRAMES)
codec->num_frames = value;
else
error = CODEC_ERROR_NUM_FRAMES;
break;
case CODEC_TAG_NUM_CHANNELS: // Number of channels in the transform
assert(value <= CODEC_MAX_CHANNELS);
if (value <= CODEC_MAX_CHANNELS)
codec->num_channels = value;
else
error = CODEC_ERROR_NUM_CHANNELS;
break;
case CODEC_TAG_NUM_WAVELETS: // Number of wavelets in the transform
assert(0 < value && value <= TRANSFORM_NUM_WAVELETS);
if (0 < value && value <= TRANSFORM_NUM_WAVELETS)
codec->num_wavelets = value;
else
error = CODEC_ERROR_NUM_WAVELETS;
break;
case CODEC_TAG_NUM_SUBBANDS: // Number of encoded subbands
assert(0 < value && value <= TRANSFORM_NUM_SUBBANDS);
if (0 < value && value <= TRANSFORM_NUM_SUBBANDS)
codec->num_subbands = value;
else
error = CODEC_ERROR_NUM_SUBBANDS;
break;
case CODEC_TAG_NUM_SPATIAL: // Number of spatial levels
assert(0 < value && value <= TRANSFORM_NUM_SPATIAL);
if (0 < value && value <= TRANSFORM_NUM_SPATIAL)
codec->num_spatial = value;
else
error = CODEC_ERROR_NUM_SPATIAL;
break;
case CODEC_TAG_FIRST_WAVELET: // Type of the first wavelet
assert(value == TRANSFORM_FIRST_WAVELET);
if (value == TRANSFORM_FIRST_WAVELET)
codec->first_wavelet = value;
else
error = CODEC_ERROR_FIRST_WAVELET;
break;
case CODEC_TAG_CHANNEL_SIZE: // Number of bytes in each channel
assert(0);
break;
case CODEC_TAG_GROUP_TRAILER: // Group trailer and checksum
codec->sample_done = true;
break;
case CODEC_TAG_FRAME_TYPE: // Type of frame marks the frame start
codec->frame.type = value;
break;
case CODEC_TAG_FRAME_WIDTH: // Width of the frame
codec->frame.width = value;
break;
case CODEC_TAG_FRAME_HEIGHT: // Height of the frame
codec->frame.height = value;
//DAN20080729 -- Initialize the default colorspace based on clip resolution
if ((decoder->frame.colorspace & COLORSPACE_MASK) == COLOR_SPACE_UNDEFINED)
{
int internalheight = value;
int internalwidth = codec->frame.width;
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
internalwidth *= 2;
internalheight *= 2;
}
if(internalheight > 576 || internalwidth > 720)
decoder->frame.colorspace |= COLOR_SPACE_CG_709;
else
decoder->frame.colorspace |= COLOR_SPACE_CG_601;
}
//if(decoder->frame.colorspace_filedefault)
// decoder->frame.colorspace = decoder->frame.colorspace_filedefault;
if(decoder->frame.colorspace_override)
decoder->frame.colorspace = decoder->frame.colorspace_override;
break;
case CODEC_TAG_ENCODED_COLORSPACE: //DAN20080729
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
value &= ~(COLOR_SPACE_BT_601|COLOR_SPACE_BT_709); // Bayer has no 601 vs 709,
//there was a bug in 3.9.4 that had bayer flagged as 601.
if(decoder->frame.colorspace_override)
decoder->frame.colorspace = decoder->frame.colorspace_override;
else
{
if(decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422)
{
decoder->frame.colorspace &= ~(COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709);
decoder->frame.colorspace |= (value & (COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709));
//Let the VSRGB status be controllable by the calling application (e.g. Vegas)
}
else
{
decoder->frame.colorspace &= ~(COLOR_SPACE_VS_RGB);
decoder->frame.colorspace |= (value & (COLOR_SPACE_VS_RGB));
}
}
decoder->frame.colorspace_filedefault = value;
break;
case CODEC_TAG_FRAME_FORMAT: // Format of the encoded pixels (GRAY, YUV, RGB, RGBA)
assert(0);
break;
case CODEC_TAG_INPUT_FORMAT: // Format of the original pixels
codec->input_format = value;
// Set the encoded format if it has not already been set
// error = UpdateEncodedFormat(codec, (COLOR_FORMAT)value);
break;
case CODEC_TAG_ENCODED_FORMAT: // Internal format of the encoded data
case CODEC_TAG_OLD_ENCODED_FORMAT:
codec->encoded_format = value;
if(codec->encoded_format == ENCODED_FORMAT_RGBA_4444 && codec->num_channels == 3)
codec->encoded_format = ENCODED_FORMAT_RGB_444;
break;
case CODEC_TAG_FRAME_INDEX: // Position of frame within the group
codec->frame.group_index = value;
break;
case CODEC_TAG_FRAME_TRAILER: // Frame trailer and checksum
codec->sample_done = true;
break;
case CODEC_TAG_LOWPASS_SUBBAND: // Subband number of the lowpass band
codec->lowpass.subband = value;
error = SetDefaultEncodedFormat(codec);
break;
case CODEC_TAG_NUM_LEVELS: // Number of wavelet levels
codec->lowpass.level = value;
break;
case CODEC_TAG_LOWPASS_WIDTH: // Width of the lowpass band
codec->lowpass.width = value;
break;
case CODEC_TAG_LOWPASS_HEIGHT: // Height of the lowpass band
codec->lowpass.height = value;
break;
case CODEC_TAG_MARGIN_TOP: // Margins that define the encoded subset
codec->lowpass.margin.top = value;
break;
case CODEC_TAG_MARGIN_BOTTOM:
codec->lowpass.margin.bottom = value;
break;
case CODEC_TAG_MARGIN_LEFT:
codec->lowpass.margin.left = value;
break;
case CODEC_TAG_MARGIN_RIGHT:
codec->lowpass.margin.right = value;
break;
case CODEC_TAG_PIXEL_OFFSET: // Quantization parameters
codec->lowpass.pixel_offset = value;
break;
case CODEC_TAG_QUANTIZATION: // Quantization divisor used during encoding
codec->lowpass.quantization = value;
break;
case CODEC_TAG_PIXEL_DEPTH: // Number of bits per pixel
codec->lowpass.bits_per_pixel = value;
break;
case CODEC_TAG_LOWPASS_TRAILER: // Lowpass trailer
assert(0);
break;
case CODEC_TAG_WAVELET_TYPE: // Type of wavelet
codec->highpass.wavelet_type = value;
break;
case CODEC_TAG_WAVELET_NUMBER: // Number of the wavelet in the transform
codec->highpass.wavelet_number = value;
break;
case CODEC_TAG_WAVELET_LEVEL: // Level of the wavelet in the transform
codec->highpass.wavelet_level = value;
break;
case CODEC_TAG_NUM_BANDS: // Number of wavelet bands
codec->highpass.num_bands = value;
break;
case CODEC_TAG_HIGHPASS_WIDTH: // Width of each highpass band
codec->highpass.width = value;
break;
case CODEC_TAG_HIGHPASS_HEIGHT: // Height of each highpass band
codec->highpass.height = value;
break;
case CODEC_TAG_LOWPASS_BORDER: // Dimensions of lowpass border (obsolete)
codec->highpass.lowpass_border = value;
break;
case CODEC_TAG_HIGHPASS_BORDER: // Dimensions of highpass border (obsolete)
codec->highpass.highpass_border = value;
break;
case CODEC_TAG_LOWPASS_SCALE: // Scale factor for lowpass band
codec->highpass.lowpass_scale = value;
break;
case CODEC_TAG_LOWPASS_DIVISOR: // Divisor for the lowpass band
codec->highpass.lowpass_divisor = value;
break;
case CODEC_TAG_HIGHPASS_TRAILER: // Highpass trailer
assert(0);
break;
case CODEC_TAG_BAND_NUMBER: // Identifying number of a wavelet band
codec->band.number = value;
break;
case CODEC_TAG_BAND_WIDTH: // Band data width
codec->band.width = value;
break;
case CODEC_TAG_BAND_HEIGHT: // Band data height
codec->band.height = value;
break;
case CODEC_TAG_BAND_SUBBAND: // Subband number of this wavelet band
codec->band.subband = value;
//assert(value != 255);
break;
case CODEC_TAG_BAND_ENCODING: // Encoding method for this band
codec->band.encoding = value;
break;
case CODEC_TAG_BAND_QUANTIZATION: // Quantization applied to band
codec->band.quantization = value;
break;
case CODEC_TAG_BAND_SCALE: // Band scale factor
codec->band.scale = value;
break;
case CODEC_TAG_BAND_TRAILER: // Band trailer
assert(0);
break;
case CODEC_TAG_NUM_ZEROVALUES: // Number of zero values
assert(0);
break;
case CODEC_TAG_NUM_ZEROTREES: // Number of zerotrees
assert(0);
break;
case CODEC_TAG_NUM_POSITIVES: // Number of positive values
assert(0);
break;
case CODEC_TAG_NUM_NEGATIVES: // Number of negative values
assert(0);
break;
case CODEC_TAG_NUM_ZERONODES: // Number of zerotree nodes
assert(0);
break;
case CODEC_TAG_CHANNEL: // Channel number
assert(0);
break;
case CODEC_TAG_INTERLACED_FLAGS: // Interlaced structure of the video stream
//assert(0);
break;
//assert(0);
case CODEC_TAG_PROTECTION_FLAGS: // Copy protection bits
//assert(0);
break;
case CODEC_TAG_PICTURE_ASPECT_X: // Numerator of the picture aspect ratio
codec->picture_aspect_x = value;
//assert(0);
break;
case CODEC_TAG_PICTURE_ASPECT_Y: // Denominator of the picture aspect ratio
codec->picture_aspect_y = value;
//assert(0);
break;
case CODEC_TAG_SAMPLE_FLAGS: // Flag bits that control sample decoding
// Progressive versus interlaced decoding is specified by the sample flags
error = UpdateCodecFlags(codec, value);
break;
case CODEC_TAG_FRAME_NUMBER: // Sequence number of the frame in the bitstream
codec->frame_number = value;
break;
// This TAG is now support as part of the universal decoder.
// Only Prospect HD builds can decode 10bit.
case CODEC_TAG_PRECISION: // Number of bits in the video source
codec->precision = value;
{
int i;
for(i=0;i<TRANSFORM_MAX_CHANNELS;i++)
{
TRANSFORM *transform = decoder->transform[i];
if(transform)
{
GetTransformPrescale(transform, codec->transform_type, codec->precision);
}
}
}
break;
case CODEC_TAG_PRESCALE_TABLE:
{
int i;
int prescale[TRANSFORM_MAX_WAVELETS] = {0};
for(i=0;i<TRANSFORM_MAX_WAVELETS;i++)
prescale[i] = value >> (14-i*2) & 0x3;
for(i=0;i<TRANSFORM_MAX_CHANNELS;i++)
{
TRANSFORM *transform = decoder->transform[i];
if(transform)
{
memcpy(transform->prescale, prescale, sizeof(prescale));
}
}
}
break;
case CODEC_TAG_VERSION: // Version number of the encoder used in each GOP.
codec->version[0] = (value>>12) & 0xf;
codec->version[1] = (value>>8) & 0xf;
codec->version[2] = value & 0xff;
break;
case CODEC_TAG_QUALITY_L: //
codec->encode_quality &= 0xffff0000;
codec->encode_quality |= value;
break;
case CODEC_TAG_QUALITY_H: //
codec->encode_quality &= 0xffff;
codec->encode_quality |= value<<16;
break;
case CODEC_TAG_BAND_CODING_FLAGS:
codec->active_codebook = value & 0xf; // 0-15 valid code books
codec->difference_coding = (value>>4) & 1;
break;
// Peak table processing
case CODEC_TAG_PEAK_TABLE_OFFSET_L:
codec->peak_table.offset &= ~0xffff;
codec->peak_table.offset |= (value & 0xffff);
codec->peak_table.base = (PIXEL *)(input->lpCurrentWord);
codec->peak_table.level = 0; // reset for the next subband
break;
case CODEC_TAG_PEAK_TABLE_OFFSET_H:
codec->peak_table.offset &= 0xffff;
codec->peak_table.offset |= (value & 0xffff)<<16;
codec->peak_table.level = 0; // reset for the next subband
break;
case CODEC_TAG_PEAK_LEVEL:
codec->peak_table.level = value;
codec->peak_table.base += codec->peak_table.offset / sizeof(PIXEL);
break;
case CODEC_TAG_PEAK_TABLE:
//this is the chunk header, so we have peak data
codec->peak_table.level = 0; // reset for the next subband
//Just skip as the data was read ahead
chunksize = value;
chunksize &= 0xffff;
input->lpCurrentWord += chunksize*4;
input->nWordsUsed -= chunksize*4;
break;
#if (1 && DEBUG)
case CODEC_TAG_SAMPLE_END: // Marks the end of the sample (for debugging only)
assert(0);
break;
#endif
default: // Unknown tag
if(tag & 0x4000)
{
if(tag & 0x2000) // i.e. 0x6xxx = 24bit size.
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
}
else // 16bit size
{
chunksize = value;
chunksize &= 0xffff;
}
}
else if(tag & 0x2000) //24bit LONGs chunk size
{
optional = true; // Fixes a weird seneraio where the size fields in SizeTagPop() has not
// updated the size and turned the tag to optional. TODO : WHY
chunksize = 0; // not not skip
// chunksize = value + ((tag & 0xff)<<16);
// do not skip an unknown but optional chunk
// These are only use to size subbands, but the data within should not be skipped
// unless
if((tag & 0xff00) == CODEC_TAG_UNCOMPRESS)
{
optional = true;
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
decoder->uncompressed_chunk = (uint32_t *)input->lpCurrentWord;
decoder->uncompressed_size = chunksize*4;
decoder->sample_uncompressed = 1;
}
}
assert(optional);
if(!optional)
{
error = CODEC_ERROR_UNKNOWN_REQUIRED_TAG;
}
else if(chunksize > 0) // skip this option chunk
{
input->lpCurrentWord += chunksize*4;
input->nWordsUsed -= chunksize*4;
}
break;
}
return error;
}
void UpdateWaveletBandValidFlags(DECODER *decoder, IMAGE *wavelet, int band)
{
assert(decoder != NULL);
assert(wavelet != NULL);
if (decoder != NULL && wavelet != NULL)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
#if _THREADED_DECODER
// Lock access to the wavelet data
if(decoder->entropy_worker_new.pool.thread_count)
Lock(&decoder->entropy_worker_new.lock);
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Changing band valid flags: 0x%04X, mask: 0x%04X\n",
wavelet->band_valid_flags, BAND_VALID_MASK(band));
}
#endif
// Update the wavelet band flags
wavelet->band_valid_flags |= BAND_VALID_MASK(band);
wavelet->band_started_flags |= BAND_VALID_MASK(band);
#if _THREADED_DECODER
// Unlock access to the wavelet data
if(decoder->entropy_worker_new.pool.thread_count)
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
}
void UpdateWaveletBandStartedFlags(DECODER *decoder, IMAGE *wavelet, int band)
{
assert(decoder != NULL);
assert(wavelet != NULL);
if (decoder != NULL && wavelet != NULL)
{
// Update the wavelet band flags
#if _DELAYED_THREAD_START==0
if(decoder->entropy_worker_new.pool.thread_count)
Lock(&decoder->entropy_worker_new.lock);
#endif
wavelet->band_started_flags |= BAND_VALID_MASK(band);
#if _DELAYED_THREAD_START==0
if(decoder->entropy_worker_new.pool.thread_count)
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
}
bool DecodedBandsValid(IMAGE *wavelet, int index, int transform_type)
{
uint32_t threaded_band_mask;
uint32_t wavelet_band_mask;
uint32_t decoded_band_mask;
bool decoded_bands_valid;
// Has this wavelet been created?
if (wavelet == NULL)
{
// Too soon to wait for the wavelet bands to be decoded
return false;
}
// Is this a fieldplus transform?
if (transform_type == TRANSFORM_TYPE_FIELDPLUS)
{
// Is this the temporal wavelet?
if (index == 2)
{
assert(wavelet->wavelet_type == WAVELET_TYPE_TEMPORAL);
assert(wavelet->num_bands == 2);
// Earlier transforms in the queue will compute both wavelet bands
return true;
}
// Is this wavelet at the end of a chain of transforms?
if (index == 3 || index == 5)
{
// Must wait for all bands to be decoded
threaded_band_mask = 0;
}
else
{
// The lowpass band will be computed by transforms earlier in the queue
threaded_band_mask = BAND_VALID_MASK(0);
}
}
// Is this a spatial transform?
else if (transform_type == TRANSFORM_TYPE_SPATIAL)
{
// Is this wavelet at the top of the pyramid?
if (index == 2)
{
// Must wait for all bands to be decoded
threaded_band_mask = 0;
}
#if 0
// Is this wavelet at the bottom of the pyramid?
else if (index == 0)
{
// Must wait for all bands to be decoded
threaded_band_mask = 0;
}
#endif
else
{
// The lowpass band will be computed by transforms earlier in the queue
threaded_band_mask = BAND_VALID_MASK(0);
}
}
else
{
// Unknown type of transform
assert(0);
// Assume that the bands are not valid
return false;
}
// Compute the mask for the bands in this wavelet
decoded_band_mask = ((1 << wavelet->num_bands) - 1);
// Clear the bit for the band computed by the threaded transform
decoded_band_mask &= ~threaded_band_mask;
// Compute the wavelet bands that have been decoded
wavelet_band_mask = (wavelet->band_valid_flags & decoded_band_mask);
// Have all of the bands not computed by the transform thread been decoded?
decoded_bands_valid = (wavelet_band_mask == decoded_band_mask);
return decoded_bands_valid;
}
void QueueThreadedTransform(DECODER *decoder, int channel, int index)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
TRANSFORM *transform = decoder->transform[channel];
//IMAGE *wavelet = transform->wavelet[index];
int precision = codec->precision;
// The transform data structure must exist
assert(transform != NULL);
// The transform thread variables should have been created
{
int free_entry;
#if _DELAYED_THREAD_START==0
// Lock access to the transform queue
Lock(&decoder->entropy_worker_new.lock);
#endif
// Copy the transform parameters into the next queue entry
free_entry = decoder->transform_queue.free_entry;
assert(0 <= free_entry && free_entry < DECODING_QUEUE_LENGTH);
if (0 <= free_entry && free_entry < DECODING_QUEUE_LENGTH)
{
assert(transform != NULL);
assert(0 <= channel && channel < TRANSFORM_MAX_CHANNELS);
assert(0 <= index && index < TRANSFORM_MAX_WAVELETS);
// Note: The wavelet may not exist when the transform is queued
decoder->transform_queue.queue[free_entry].transform = transform;
decoder->transform_queue.queue[free_entry].channel = channel;
decoder->transform_queue.queue[free_entry].index = index;
decoder->transform_queue.queue[free_entry].precision = precision;
decoder->transform_queue.queue[free_entry].done = 0;
// Update the transform request queue
decoder->transform_queue.free_entry++;
decoder->transform_queue.num_entries++;
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "Queued transform, channel: %d, index: %d\n", channel, index);
}
#endif
}
#if _DELAYED_THREAD_START==0
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
}
#if _THREADED_DECODER
void WaitForTransformThread(DECODER *decoder)
{
if(decoder->entropy_worker_new.pool.thread_count)
{
#if _DELAYED_THREAD_START
ThreadPoolSendMessage(&decoder->entropy_worker_new.pool, THREAD_MESSAGE_START);
#endif
ThreadPoolWaitAllDone(&decoder->entropy_worker_new.pool);
decoder->transform_queue.started = 0;
decoder->transform_queue.num_entries = 0;
decoder->transform_queue.next_entry = 0;
decoder->transform_queue.free_entry = 0;
}
}
#endif
#endif
#if _INTERLACED_WORKER_THREADS
void TransformInverseFrameThreadedToYUV(DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
int32_t lPreviousCount,i;
// There are half as many input rows as output rows
int transform_height = (((info->height+7)/8)*8) / 2;
int middle_row_count = transform_height;
// Post a message to the mailbox
struct interlace_data *mailbox = &decoder->interlaced_worker.interlace_data;
mailbox->type = THREAD_TRANSFORM_FRAME_YUV;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
// Set the semaphore to the number of rows
decoder->interlaced_worker.current_row = 0;
ReleaseSemaphore(decoder->interlaced_worker.row_semaphore, middle_row_count, &lPreviousCount);
assert(lPreviousCount == 0);
// Wake up both worker threads
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
SetEvent(decoder->interlaced_worker.start_event[i]);
}
// Wait for both worker threads to finish
WaitForMultipleObjects(THREADS_IN_LAST_WAVELET, decoder->interlaced_worker.done_event, true, INFINITE);
}
void TransformInverseFrameThreadedToRow16u(DECODER *decoder, int frame_index, int num_channels,
PIXEL16U *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
int32_t lPreviousCount,i;
// There are half as many input rows as output rows
int transform_height = (((info->height+7)/8)*8) / 2;
int middle_row_count = transform_height;
// Post a message to the mailbox
struct interlace_data *mailbox = &decoder->interlaced_worker.interlace_data;
mailbox->type = THREAD_TRANSFORM_FRAME_ROW16U;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = (uint8_t *)output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
// Set the semaphore to the number of rows
decoder->interlaced_worker.current_row = 0;
ReleaseSemaphore(decoder->interlaced_worker.row_semaphore, middle_row_count, &lPreviousCount);
assert(lPreviousCount == 0);
// Wake up both worker threads
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
SetEvent(decoder->interlaced_worker.start_event[i]);
}
// Wait for both worker threads to finish
WaitForMultipleObjects(THREADS_IN_LAST_WAVELET, decoder->interlaced_worker.done_event, true, INFINITE);
}
DWORD WINAPI InterlacedWorkerThreadProc(LPVOID lpParam)
{
DECODER *decoder = (DECODER *)lpParam;
FILE *logfile = decoder->logfile;
struct interlace_data *data = &decoder->interlaced_worker.interlace_data;
int thread_index;
HANDLE hObjects[2];
DWORD dwReturnValue;
if(decoder->thread_cntrl.affinity)
{
HANDLE hCurrentThread = GetCurrentThread();
SetThreadAffinityMask(hCurrentThread,decoder->thread_cntrl.affinity);
}
// Set the handler for system exceptions
#ifdef _WIN32
SetDefaultExceptionHandler();
#endif
// Determine the index of this worker thread
if(decoder->interlaced_worker.lock_init)
{
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
thread_index = decoder->interlaced_worker.thread_count++;
if(decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
// The transform worker variables should have been created
assert(decoder->interlaced_worker.start_event[thread_index] != NULL);
assert(decoder->interlaced_worker.row_semaphore != NULL);
assert(decoder->interlaced_worker.done_event[thread_index] != NULL);
assert(decoder->interlaced_worker.stop_event != NULL);
if (!(decoder->interlaced_worker.start_event[thread_index] != NULL &&
decoder->interlaced_worker.row_semaphore != NULL &&
decoder->interlaced_worker.done_event[thread_index] != NULL &&
decoder->interlaced_worker.stop_event != NULL)) {
return 1;
}
hObjects[0] = decoder->interlaced_worker.start_event[thread_index];
hObjects[1] = decoder->interlaced_worker.stop_event;
for (;;)
{
// Wait for the signal to begin processing a transform
dwReturnValue = WaitForMultipleObjects(2, hObjects, false, INFINITE);
// Received a signal to begin inverse transform processing?
if (dwReturnValue == WAIT_OBJECT_0)
{
int type; // Type of inverse transform to perform
int frame_index; // Index of output frame to produce
int num_channels; // Number of channels in the transform array
uint8_t *output; // Output frame buffer
int pitch; // Output frame pitch
FRAME_INFO info; // Format of the output frame
int chroma_offset; // Offset for the output chroma
int precision; // Source pixel bit depth
// Lock access to the transform data
if(decoder->interlaced_worker.lock_init) {
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
// Get the processing parameters
type = data->type;
frame_index = data->frame;
num_channels = data->num_channels;
output = data->output;
pitch = data->pitch;
memcpy(&info, &data->info, sizeof(FRAME_INFO));
chroma_offset = data->chroma_offset;
precision = data->precision;
// Unlock access to the transform data
if(decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
// Select the type of inverse transform to perform
switch (type)
{
case THREAD_TRANSFORM_FRAME_YUV:
//TODO: more to new _THREADED model
TransformInverseFrameSectionToYUV(decoder, thread_index, frame_index, num_channels,
output, pitch, &info, chroma_offset, precision);
break;
case THREAD_TRANSFORM_FRAME_ROW16U:
//TODO: more to new _THREADED model
TransformInverseFrameSectionToRow16u(decoder, thread_index, frame_index, num_channels,
(PIXEL16U *)output, pitch, &info, chroma_offset, precision);
break;
default:
assert(0);
break;
}
// Signal that this thread is done
SetEvent(decoder->interlaced_worker.done_event[thread_index]);
}
else
{
// Should have a condition that causes the thread to terminate
assert(dwReturnValue == WAIT_OBJECT_0+1 || dwReturnValue == WAIT_ABANDONED);
break;
}
}
return 0;
}
#endif
void GetDecodedFrameDimensions(TRANSFORM **transform_array,
int num_channels,
int frame_index,
int resolution,
int *decoded_width_out,
int *decoded_height_out)
{
IMAGE *wavelet = NULL;
int decoded_scale = 0;
int wavelet_width;
int wavelet_height;
int decoded_width;
int decoded_height;
// Get the decoding scale
switch(resolution)
{
case DECODED_RESOLUTION_FULL_DEBAYER:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame_index));
#endif
decoded_scale = 2;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_FULL:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame_index));
#endif
decoded_scale = 2;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_HALF_NODEBAYER:
case DECODED_RESOLUTION_HALF:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame_index));
#endif
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_QUARTER:
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[3];
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[5];
// Is this an intra frame?
if (wavelet == NULL) {
wavelet = transform_array[0]->wavelet[2];
}
break;
default:
assert(0);
break;
}
// Compute the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = decoded_scale * wavelet_width;
decoded_height = decoded_scale * wavelet_height;
if (decoded_width_out) {
*decoded_width_out = decoded_width;
}
if (decoded_height_out) {
*decoded_height_out = decoded_height;
}
}
// Reconstruct Bayer format to the requested output format
CODEC_ERROR UncompressedSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int precision = codec->precision;
int format = info->format;
int width = info->width;
int height = info->height;
//int resolution = info->resolution;
// Compute the number of bytes between each row of Bayer data
//int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
//int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
switch (format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RG48: //DAN20090120 added not sure why they weren't here.
case DECODED_FORMAT_RG64: //DAN20101207 added not sure why they weren't here.
case DECODED_FORMAT_WP13: //DAN20090120 ""
case DECODED_FORMAT_W13A: //DAN20101207 ""
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_V210:
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YUYV: //?
case DECODED_FORMAT_UYVY: //?
case DECODED_FORMAT_R408:
case DECODED_FORMAT_V408:
error = CODEC_ERROR_OKAY;
break;
case DECODED_FORMAT_BYR2:
case DECODED_FORMAT_BYR4:
{
//bool linearRestore = false;
unsigned short *curve = NULL;
if(decoder->BYR4LinearRestore && decoder->frame.format == DECODED_FORMAT_BYR4 && decoder->cfhddata.encode_curve_preset == 0)
{
curve = decoder->BYR4LinearRestore;
}
ConvertPackedToBYR2(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, output_buffer, output_pitch, curve);
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
break;
case DECODED_FORMAT_BYR3:
ConvertPackedToBYR3(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, output_buffer, output_pitch);
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
break;
}
if(error)
return error;
//int row;
//int column;
// Need to allocate a scratch buffer for decoding the Bayer frame?
if (decoder->RawBayer16 == NULL)
{
// Four Bayer data samples at each 2x2 quad in the grid
int pixel_size = 4 * sizeof(PIXEL16U);
int frame_size;
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
frame_size = width * height * pixel_size;
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = frame_size;
if(decoder->RGBFilterBuffer16 == NULL)
{
int size = frame_size*3;
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
size = frame_size*4;
#if _ALLOCATOR
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16);
#else
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size*3;
}
}
// Using the RGBFilterBuffer16 as scratch space
ConvertPackedToRawBayer16(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, decoder->RawBayer16, decoder->RGBFilterBuffer16, info->resolution);
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
#if _THREADED
//DemosaicRAW
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int inverted = false;
uint8_t *output = output_buffer;
int pitch = output_pitch;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
if (format == DECODED_FORMAT_RGB24)
{
format = DECODED_FORMAT_RGB24_INVERTED;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32)
{
format = DECODED_FORMAT_RGB32_INVERTED;
inverted = true;
}
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
if(info->resolution == DECODED_RESOLUTION_FULL_DEBAYER || info->resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
height *= 2;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
#endif
return error;
}
// Reconstruct uncompressed v210 YUV format to the requested output format
CODEC_ERROR UncompressedSampleFrameYUVToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int precision = codec->precision;
int format = info->format;
int width = info->width;
int height = info->height;
int resolution = info->resolution;
// Compute the number of bytes between each row of Bayer data
//int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
//int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
if(format == DECODED_FORMAT_V210 && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false)
{
int smallest_Stride = output_pitch;
int unc_Stride = decoder->uncompressed_size / height;
if(unc_Stride < smallest_Stride)
smallest_Stride = unc_Stride;
if(unc_Stride == output_pitch)
memcpy(output_buffer, decoder->uncompressed_chunk, decoder->uncompressed_size);
else
{
int y;
uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
uint8_t *dst = (uint8_t *)output_buffer;
for(y=0; y<height; y++)
{
memcpy(dst, src, smallest_Stride);
src += unc_Stride;
dst += output_pitch;
}
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
}
if((format == DECODED_FORMAT_YUYV || format == DECODED_FORMAT_UYVY) && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false)
{
int smallest_Stride = output_pitch;
int unc_Stride = decoder->uncompressed_size / height;
if(unc_Stride < smallest_Stride)
smallest_Stride = unc_Stride;
{
int y;
uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
uint8_t *dst = (uint8_t *)output_buffer;
for(y=0; y<height; y++)
{
uint32_t *input_ptr = (uint32_t *)src;
int pos = 0;
int column=0,length = width;
length -= length % 6; //DAN03252004 -- fix a memory overflow.
for (column=0; column < length; column += 6)
{
uint32_t yuv;
int y;
int u;
int v;
// Read the first word
yuv = *(input_ptr++);
u = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
v = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
// Expand the pixels to sixteen bits
u <<= 6;
y <<= 6;
v <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(u)>>8;
// Read the second word
yuv = *(input_ptr++);
y = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(v)>>8;
u = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
u <<= 6;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(u)>>8;
// Read the third word
yuv = *(input_ptr++);
v = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
v <<= 6;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(v)>>8;
u = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
u <<= 6;
// Read the fourth word
yuv = *(input_ptr++);
y = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(u)>>8;
v = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
v <<= 6;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(v)>>8;
}
if(format == DECODED_FORMAT_UYVY)
{
for (column=0; column < pos; column += 2)
{
int t = dst[column];
dst[column] = dst[column+1];
dst[column+1] = t;
}
}
src += unc_Stride;
dst += output_pitch;
}
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
}
{
// Expand YUV at the target resolution, and use the ActiveMetadata engine.
// Need to allocate a scratch buffer for decoding the frame?
if (decoder->RawBayer16 == NULL || decoder->RawBayerSize < width * 64) //RawBayer used as a scratch buffer
{
//int pixel_size = 2 * sizeof(PIXEL16U);
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
int orig_width = width;
if(resolution == DECODED_RESOLUTION_HALF)
orig_width *= 2;
if(resolution == DECODED_RESOLUTION_QUARTER)
orig_width *= 4;
if(decoder->RawBayer16)
{
#if _ALLOCATOR
FreeAligned(allocator, decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#else
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#endif
}
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, orig_width * 64, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(orig_width * 64, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = orig_width * 64;
}
}
// unpack source original YUV into YU64?
if(decoder->RawBayer16)
{
//uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
//uint8_t *dst = (uint8_t *)output_buffer;
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output_buffer;
mailbox->pitch = output_pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT_UNCOMPRESSED;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
{
int orig_width = width;
int orig_height = height;
int row,lines = 1;
int start,end;
if(resolution == DECODED_RESOLUTION_HALF)
{
orig_width *= 2;
orig_height *= 2;
lines = 2;
}
if(resolution == DECODED_RESOLUTION_QUARTER)
{
orig_width *= 4;
orig_height *= 4;
lines = 4;
}
start = 0;
end = height;
if(format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24)
{
start = height-1;
end = -1;
}
for (row = start; row != end; end > start ? row++ : row--)
{
int whitebitdepth = 16;
int flags = 0;
uint8_t *planar_output[3];
int planar_pitch[3];
ROI roi;
PIXEL16U *y_row_ptr;
PIXEL16U *u_row_ptr;
PIXEL16U *v_row_ptr;
PIXEL16U *scanline = (PIXEL16U *)decoder->RawBayer16;
PIXEL16U *scanline2 = scanline + orig_width * 8;
unsigned short *sptr;
int i,unc_Stride = decoder->uncompressed_size / orig_height;
y_row_ptr = (PIXEL16U *)scanline;
u_row_ptr = y_row_ptr + orig_width;
v_row_ptr = u_row_ptr + orig_width/2;
for(i=0; i<lines; i++)
{
src = (uint8_t *)decoder->uncompressed_chunk;
src += row * unc_Stride;
// Repack the row of 10-bit pixels into 16-bit pixels
ConvertV210RowToYUV16((uint8_t *)src, y_row_ptr, u_row_ptr, v_row_ptr, orig_width, scanline2);
// Advance to the next rows in the input and output images
y_row_ptr += orig_width*2;
u_row_ptr = y_row_ptr + orig_width;
v_row_ptr = u_row_ptr + orig_width/2;
}
y_row_ptr = (PIXEL16U *)scanline;
u_row_ptr = y_row_ptr + width;
v_row_ptr = u_row_ptr + width/2;
if(lines == 2)
{
for(i=0; i<width*2;i++)
y_row_ptr[i] = (y_row_ptr[i*2] + y_row_ptr[i*2+1] + y_row_ptr[orig_width*2+i*2] + y_row_ptr[orig_width*2+i*2+1]) >> 2;
}
else if(lines == 4)
{
for(i=0; i<width*2;i++)
y_row_ptr[i] = (y_row_ptr[i*4] + y_row_ptr[i*4+2] + y_row_ptr[orig_width*2*2+i*4] + y_row_ptr[orig_width*2*2+i*4+2]) >> 2;
}
roi.width = width;
roi.height = 1;
planar_output[0] = (uint8_t *)y_row_ptr;
planar_output[1] = (uint8_t *)v_row_ptr;
planar_output[2] = (uint8_t *)u_row_ptr;
planar_pitch[0] = 0;
planar_pitch[1] = 0;
planar_pitch[2] = 0;
if(decoder->apply_color_active_metadata)
{
ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi,
(unsigned char *)scanline2, width, output_pitch,
COLOR_FORMAT_RGB_8PIXEL_PLANAR, decoder->frame.colorspace, &whitebitdepth, &flags);
sptr = scanline2;
sptr = ApplyActiveMetaData(decoder, width, 1, row, scanline2, scanline,
info->format, &whitebitdepth, &flags);
}
else
{
ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi,
(unsigned char *)scanline2, width, output_pitch,
COLOR_FORMAT_WP13, decoder->frame.colorspace, &whitebitdepth, &flags);
sptr = scanline2;
}
ConvertLinesToOutput(decoder, width, 1, row, sptr,
dst, output_pitch, format, whitebitdepth, flags);
dst += output_pitch;
}
}
#endif
}
error = CODEC_ERROR_OKAY;
return error;
}
// Reconstruct uncompressed DPX0 RGB format to the requested output format
CODEC_ERROR UncompressedSampleFrameRGBToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int precision = codec->precision;
int format = info->format;
//int output_format = info->output_format; // used by image_dev_only decodes
int width = info->width;
int height = info->height;
int resolution = info->resolution;
//int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
if( (format == DECODED_FORMAT_DPX0 || format == DECODED_FORMAT_AR10 || format == DECODED_FORMAT_AB10 || format == DECODED_FORMAT_RG30 || format == DECODED_FORMAT_R210) &&
resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false)
{
int smallest_Stride = output_pitch;
int unc_Stride = decoder->uncompressed_size / height;
if(unc_Stride < smallest_Stride)
smallest_Stride = unc_Stride;
if(format != DECODED_FORMAT_DPX0)
{
int unc_Stride = decoder->uncompressed_size / height;
ConvertDPX0ToRGB10((uint8_t *)decoder->uncompressed_chunk, unc_Stride, width, height, format);
}
if(unc_Stride == output_pitch)
memcpy(output_buffer, decoder->uncompressed_chunk, decoder->uncompressed_size);
else
{
int y;
uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
uint8_t *dst = (uint8_t *)output_buffer;
for(y=0; y<height; y++)
{
memcpy(dst, src, smallest_Stride);
src += unc_Stride;
dst += output_pitch;
}
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
}
{
// Expand YUV at the target resolution, and use the ActiveMetadata engine.
// Need to allocate a scratch buffer for decoding the frame?
if (decoder->RawBayer16 == NULL || decoder->RawBayerSize < width * 64) //RawBayer used as a scratch buffer
{
//int pixel_size = 2 * sizeof(PIXEL16U);
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
int orig_width = width;
if(resolution == DECODED_RESOLUTION_HALF)
orig_width *= 2;
if(resolution == DECODED_RESOLUTION_QUARTER)
orig_width *= 4;
if(decoder->RawBayer16)
{
#if _ALLOCATOR
FreeAligned(allocator, decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#else
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#endif
}
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, orig_width * 64, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(orig_width * 64, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = orig_width * 64;
}
}
// unpack source original YUV into YU64?
if(decoder->RawBayer16)
{
//uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
//uint8_t *dst = (uint8_t *)output_buffer;
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output_buffer;
mailbox->pitch = output_pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT_UNCOMPRESSED;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
{
int orig_width = width;
int orig_height = height;
int row,lines = 1;
int start,end;
if(resolution == DECODED_RESOLUTION_HALF)
{
orig_width *= 2;
orig_height *= 2;
lines = 2;
}
if(resolution == DECODED_RESOLUTION_QUARTER)
{
orig_width *= 4;
orig_height *= 4;
lines = 4;
}
start = 0;
end = height;
if(format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) // Can this work, all the code below expects 10-bit
{
start = height-1;
end = -1;
}
for (row = start; row != end; end > start ? row++ : row--)
{
int whitebitdepth = 16;
int flags = 0;
uint8_t *planar_output[3];
int planar_pitch[3];
ROI roi;
PIXEL16U *y_row_ptr;
PIXEL16U *u_row_ptr;
PIXEL16U *v_row_ptr;
PIXEL16U *scanline = (PIXEL16U *)decoder->RawBayer16;
PIXEL16U *scanline2 = scanline + orig_width * 8;
unsigned short *sptr;
int i,unc_Stride = decoder->uncompressed_size / orig_height;
whitebitdepth = 13;
if(decoder->apply_color_active_metadata)
flags = ACTIVEMETADATA_SRC_8PIXEL_PLANAR;
else
flags = 0;
roi.width = width;
roi.height = 1;
if(lines == 1)
{
uint16_t *sptr;
uint32_t j,*lptr = (uint32_t *)decoder->uncompressed_chunk;
PIXEL16U *ptr = (PIXEL16U *)scanline;
lptr += row * (unc_Stride>>2);
sptr = (uint16_t *)lptr;
for(i=0; i<width;i+=8)
{
int val,r,g,b;
if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR)
{
if(decoder->image_dev_only) // HACK, currently assuming RG48 input data.
{
for(j=0; j<8; j++)
{
ptr[j] = sptr[0] >> 3;
ptr[j+8] = sptr[1] >> 3;
ptr[j+16] = sptr[2] >> 3;
sptr += 3;
}
}
else
{
for(j=0; j<8; j++)
{
val = SwapInt32(*lptr++);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
ptr[j] = r;
ptr[j+8] = g;
ptr[j+16] = b;
}
}
}
else
{
if(decoder->image_dev_only) // HACK, currently assuming RG48 input data.
{
for(j=0; j<8*3; j+=3)
{
ptr[j] = sptr[0] >> 3;
ptr[j+1] = sptr[1] >> 3;
ptr[j+2] = sptr[2] >> 3;
sptr += 3;
}
}
else
{
for(j=0; j<8*3; j+=3)
{
val = SwapInt32(*lptr++);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
ptr[j] = r;
ptr[j+1] = g;
ptr[j+2] = b;
}
}
}
ptr += 24;
}
}
else if(lines == 2)
{
uint32_t j,*lptr = (uint32_t)decoder->uncompressed_chunk;
PIXEL16U *ptr = (PIXEL16U *)scanline;
lptr += row * (unc_Stride>>2) * lines;
for(i=0; i<width;i+=8)
{
int val,r,g,b,r2,g2,b2,r3,g3,b3,r4,g4,b4;
for(j=0; j<8; j++)
{
val = SwapInt32(lptr[0]);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
val = SwapInt32(lptr[1]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[unc_Stride>>2]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[(unc_Stride>>2)+1]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR)
{
ptr[j] = r>>2;
ptr[j+8] = g>>2;
ptr[j+16] = b>>2;
}
else
{
ptr[j*3] = r>>2;
ptr[j*3+1] = g>>2;
ptr[j*3+2] = b>>2;
}
lptr += lines;
}
ptr += 24;
}
}
else if(lines == 4)
{
uint32_t j,*lptr = (uint32_t)decoder->uncompressed_chunk;
PIXEL16U *ptr = (PIXEL16U *)scanline;
lptr += row * (unc_Stride>>2) * lines;
for(i=0; i<width;i+=8)
{
int val,r,g,b,r2,g2,b2,r3,g3,b3,r4,g4,b4;
for(j=0; j<8; j++)
{
val = SwapInt32(lptr[0]);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
val = SwapInt32(lptr[2]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[unc_Stride>>1]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[(unc_Stride>>1)+2]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR)
{
ptr[j] = r>>2;
ptr[j+8] = g>>2;
ptr[j+16] = b>>2;
}
else
{
ptr[j*3] = r>>2;
ptr[j*3+1] = g>>2;
ptr[j*3+2] = b>>2;
}
lptr += lines;
}
ptr += 24;
}
}
sptr = scanline;
if(decoder->apply_color_active_metadata)
sptr = ApplyActiveMetaData(decoder, width, 1, row, scanline, scanline2,
info->format, &whitebitdepth, &flags);
ConvertLinesToOutput(decoder, width, 1, row, sptr,
dst, output_pitch, format, whitebitdepth, flags);
dst += output_pitch;
}
}
#endif
}
error = CODEC_ERROR_OKAY;
return error;
}
// Reconstruct Bayer format to the requested output format
CODEC_ERROR ReconstructSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
int resolution = info->resolution;
//int format = info->format;
// Switch to the subroutine for the requested resolution
switch (resolution)
{
case DECODED_RESOLUTION_FULL_DEBAYER:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
//error = CODEC_ERROR_UNSUPPORTED_FORMAT;
return ReconstructSampleFrameDeBayerFullToBuffer(decoder, info, frame, output, pitch);
break;
case DECODED_RESOLUTION_FULL:
//return ReconstructSampleFrameBayerFullToBuffer(decoder, info, frame, output, pitch);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
//case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
case DECODED_RESOLUTION_HALF_NODEBAYER:
case DECODED_RESOLUTION_HALF:
//return ReconstructSampleFrameBayerHalfToBuffer(decoder, info, frame, output, pitch);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case DECODED_RESOLUTION_QUARTER:
//return ReconstructSampleFrameBayerQuarterToBuffer(decoder, frame, output, pitch);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
default:
// The decoded resolution is not supported by this routine
assert(0);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
return error;
}
// Reconstruct Bayer encoded data to full resolution
CODEC_ERROR ReconstructSampleFrameBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
//int resolution = info->resolution;
int format = info->format;
//int width = info->width;
//int height = info->height;
// Compute the number of bytes between each row of Bayer data
//int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
//int chroma_offset = decoder->codec.chroma_offset;
//int row;
//int column;
// Need to allocate a scratch buffer for decoding the Bayer frame?
if (decoder->RawBayer16 == NULL)
{
TRANSFORM **transform_array = decoder->transform;
int decoded_width = 0;
int decoded_height = 0;
int resolution = info->resolution;
//int format = info->format;
// Four Bayer data samples at each 2x2 quad in the grid
int pixel_size = 4 * sizeof(PIXEL16U);
int frame_size;
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
// Compute the decoded width and height for the specified resolution
GetDecodedFrameDimensions(transform_array, num_channels, frame, resolution, &decoded_width, &decoded_height);
assert(decoded_width > 0 && decoded_height > 0);
if (! (decoded_width > 0 && decoded_height > 0)) {
return CODEC_ERROR_UNSUPPORTED_FORMAT;
}
frame_size = decoded_width * decoded_height * pixel_size;
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = frame_size;
//#ifdef SHARPENING
if(decoder->RGBFilterBuffer16 == NULL)
{
int size = frame_size*3;
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
size = frame_size*4;
#if _ALLOCATOR
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16);
#else
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size*3;
}
//#endif
}
//TODO: Need to add more output formats to this routine
switch (format)
{
case DECODED_FORMAT_RGB32:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
// Decode the last transform to rows of Bayer data (one row per channel)
// TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
// decoder->RawBayer16, raw_bayer_pitch, info,
// &decoder->scratch, chroma_offset, precision);
// ConvertPackedBayerToRGB32(decoder->RawBayer16, info, bayer_pitch,
// output_buffer, output_pitch,
// width, height);
break;
case DECODED_FORMAT_RGB24:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
// Decode the last transform to rows of Bayer data (one row per channel)
//TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
// decoder->RawBayer16, raw_bayer_pitch, info,
// &decoder->scratch, chroma_offset, precision);
//ConvertPackedBayerToRGB24(decoder->RawBayer16, info, bayer_pitch,
// output_buffer, output_pitch,
// width, height);
break;
default:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
return error;
}
// Reconstruct Bayer encoded data and demosaic to full resolution
CODEC_ERROR ReconstructSampleFrameDeBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
//int resolution = info->resolution;
int format = info->format;
int width = info->width;
//int height = info->height;
// Compute the number of bytes between each row of Bayer data
int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
switch (format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RG48: //DAN20090120 added not sure why they weren't here.
case DECODED_FORMAT_WP13: //DAN20090120 ""
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_V210:
case DECODED_FORMAT_YU64:
error = CODEC_ERROR_OKAY;
break;
}
if(error)
return error;
//int row;
//int column;
// Need to allocate a scratch buffer for decoding the Bayer frame?
if (decoder->RawBayer16 == NULL)
{
TRANSFORM **transform_array = decoder->transform;
int decoded_width = 0;
int decoded_height = 0;
int resolution = info->resolution;
//int format = info->format;
// Four Bayer data samples at each 2x2 quad in the grid
int pixel_size = 4 * sizeof(PIXEL16U);
int frame_size;
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
// Compute the decoded width and height for the specified resolution
GetDecodedFrameDimensions(transform_array, num_channels, frame, resolution, &decoded_width, &decoded_height);
assert(decoded_width > 0 && decoded_height > 0);
if (! (decoded_width > 0 && decoded_height > 0)) {
return CODEC_ERROR_UNSUPPORTED_FORMAT;
}
frame_size = decoded_width * decoded_height * pixel_size;
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = frame_size;
//#ifdef SHARPENING
if(decoder->RGBFilterBuffer16 == NULL)
{
int size = frame_size*3;
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
size = frame_size*4;
#if _ALLOCATOR
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16);
#else
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size*3;
}
//#endif
}
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RawBayer16, bayer_pitch*sizeof(PIXEL),
info, chroma_offset, precision);
//DemosaicRAW
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int inverted = false;
uint8_t *output = output_buffer;
int pitch = output_pitch;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
if (format == DECODED_FORMAT_RGB24)
{
format = DECODED_FORMAT_RGB24_INVERTED;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32)
{
format = DECODED_FORMAT_RGB32_INVERTED;
inverted = true;
}
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
if(info->resolution == DECODED_RESOLUTION_FULL_DEBAYER || info->resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
height *= 2;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
#endif
return error;
}
// Reconstruct Bayer encoded data to half resolution
CODEC_ERROR ReconstructSampleFrameBayerHalfToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
TRANSFORM **transform_array = decoder->transform;
int frame_width = info->width;
int frame_height = info->height;
//int resolution = info->resolution;
int format = info->format;
//IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
PIXEL16U *g1_plane;
PIXEL16U *rg_plane;
PIXEL16U *bg_plane;
PIXEL16U *g2_plane;
int g1_pitch;
int rg_pitch;
int bg_pitch;
int g2_pitch;
#if 0
int channel;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
char *format = decoded_format_string[info->format];
sprintf(label, "Output, channel: %d, format: %s", channel, format);
DumpImageStatistics(label, lowpass_images[channel], logfile);
}
#endif
}
#endif
// Get the lowpass bands in the wavelet coresponding to the output frame
g1_plane = (PIXEL16U *)transform_array[0]->wavelet[frame]->band[0];
rg_plane = (PIXEL16U *)transform_array[1]->wavelet[frame]->band[0];
bg_plane = (PIXEL16U *)transform_array[2]->wavelet[frame]->band[0];
if(transform_array[3]->wavelet[frame]) //half res don't decode g1-g2 //HACK
{
g2_plane = (PIXEL16U *)transform_array[3]->wavelet[frame]->band[0];
g2_pitch = transform_array[3]->wavelet[frame]->pitch;
}
else
{
g2_plane = NULL;
g2_pitch = 0;
}
// Get the pitch of each plane
g1_pitch = transform_array[0]->wavelet[frame]->pitch;
rg_pitch = transform_array[1]->wavelet[frame]->pitch;
bg_pitch = transform_array[2]->wavelet[frame]->pitch;
switch (format)
{
case DECODED_FORMAT_RGB32:
ConvertPlanarBayerToRGB32(g1_plane, g1_pitch, rg_plane, rg_pitch,
bg_plane, bg_pitch, g2_plane, g2_pitch,
output_buffer, output_pitch,
frame_width, frame_height);
break;
default:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
return error;
}
// Reconstruct Bayer encoded data to quarter resolution
CODEC_ERROR ReconstructSampleFrameBayerQuarterToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//FRAME_INFO *info = &decoder->frame;
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
//int resolution = info->resolution;
//int format = info->format;
//TODO: Need to finish this routine
assert(0);
return error;
}
// Reconstruct the original YUV 4:2:2 encoded format to the requested output format
CODEC_ERROR ReconstructSampleFrameYUV422ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
FRAME_INFO *info = &decoder->frame;
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
int progressive = codec->progressive;
int precision = codec->precision;
TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
int resolution = info->resolution;
int format = info->format;
//int color_space = decoder->frame.colorspace;
//TODO: Eliminate use of the chroma offset
int chroma_offset = decoder->codec.chroma_offset;
#if _THREADED
// Type of threaded inverse transform
//int type;
#endif
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
if (decoder == NULL) {
return CODEC_ERROR_INVALID_ARGUMENT;
}
//TODO: Split this routine into subroutines for progressive versus interlaced video
//TODO: Split progressive and interlaced routines into subroutines for each resolution
if(resolution == DECODED_RESOLUTION_HALF)
{
bool inverted = false;
FRAME_INFO info2;
memcpy(&info2, info, sizeof(FRAME_INFO));
format = info2.format;
if (format == DECODED_FORMAT_RGB24) {
format = DECODED_FORMAT_RGB24_INVERTED;
info2.format = format;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32) {
format = DECODED_FORMAT_RGB32_INVERTED;
info2.format = format;
inverted = true;
}
#if 1
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
#endif
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->framenum = frame;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
return CODEC_ERROR_OKAY;
#endif
}
else
{
int precision = codec->precision;
TRANSFORM **transform_array = decoder->transform;
int channel;
IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
}
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, &info2, chroma_offset,
precision, decoder->codec.encoded_format, decoder->frame.white_point);
}
return CODEC_ERROR_OKAY;
}
// Was the video source interlaced or progressive?
if (progressive)
{
// The video source was progressive (the first transform was a spatial transform)
if (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
FRAME_INFO info2;
int format;
bool inverted = false;
int precision = codec->precision;
memcpy(&info2, info, sizeof(FRAME_INFO));
format = info2.format;
if (format == DECODED_FORMAT_RGB24) {
format = DECODED_FORMAT_RGB24_INVERTED;
info2.format = format;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32) {
format = DECODED_FORMAT_RGB32_INVERTED;
info2.format = format;
inverted = true;
}
#if 1
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
#endif
/*if(decoder->use_active_metadata_decoder)
{
switch (format & 0x7ffffff)
{
case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for
case DECODED_FORMAT_YUYV: // computing the active metadata.
case DECODED_FORMAT_UYVY:
return CODEC_ERROR_OKAY;
break;
}
}*/
switch (format & 0x7ffffff)
{
case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
#endif
}
else
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sYUVtoRGB);
return CODEC_ERROR_OKAY;
#endif
}
break;
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
#endif
}
else
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToYUV);
return CODEC_ERROR_OKAY;
#endif
}
break;
//Handle sizes that are smaller than the interim decode buffer //DAN20081222
case DECODED_FORMAT_CbYCrY_10bit_2_8:
decoder->upper_plane = output;
decoder->lower_plane = output + decoder->frame.width * decoder->frame.height / 2;
// Use the address and pitch of the lower plane
output = decoder->lower_plane;
pitch = decoder->frame.width * 2;
// Fall through and compute the inverse spatial transform
case DECODED_FORMAT_CbYCrY_16bit_2_14:
case DECODED_FORMAT_CbYCrY_16bit_10_6:
case DECODED_FORMAT_CbYCrY_8bit:
case DECODED_FORMAT_CbYCrY_16bit:
if(decoder->use_active_metadata_decoder)
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToOutput);
return CODEC_ERROR_OKAY;
}
break;
case DECODED_FORMAT_V210:
if(decoder->use_active_metadata_decoder)
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalYUVStrip16sToYUVOutput);
return CODEC_ERROR_OKAY;
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB32_INVERTED:
// As long as the outpitch is greater or equal to 4:2:2 16-bit YR16 this works.
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R408:
case DECODED_FORMAT_V408:
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_W13A:
if((format & 0x7FFFFFFF) == DECODED_FORMAT_RGB32 && decoder->use_active_metadata_decoder == false)
{
#if _THREADED
TransformInverseSpatialThreadedYUV422ToBuffer(decoder,
frame, num_channels, output, pitch,
&info2, chroma_offset, precision);
#elif 0
TransformInverseSpatialToBuffer(decoder, transform_array, frame,
num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision);
#else
TransformInverseSpatialYUV422ToOutput(decoder, transform_array,
frame, num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision,
InvertHorizontalStripYUV16sToPackedRGB32);
#endif
return CODEC_ERROR_OKAY;
}
#if _THREADED
if(decoder->use_active_metadata_decoder)
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
}
else
{
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame,
num_channels, output, pitch,
&info2, chroma_offset, precision);
ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch,
&info2, chroma_offset, precision);
return CODEC_ERROR_OKAY;
}
#endif
break;
default:
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
#endif
}
// else Return the error code for unsupported output format
break;
}
}
}
else
{
// The video source was interlaced (the first transform was a frame transform)
if (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
bool inverted = false;
if (format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) {
// info->format = DECODED_FORMAT_RGB32_INVERTED; //DAN20080702 vertically flips QT decodes if active.
inverted = true;
}
#if 1
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
#endif
switch (format & 0x7ffffff)
{
case DECODED_FORMAT_NV12:
case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
case DECODED_FORMAT_V210: // only supported with use_active_metadata_decoder
if(decoder->use_active_metadata_decoder)
{
int frame_size = info->width * info->height * 4;
if(decoder->RGBFilterBuffer16==NULL || decoder->RGBFilterBufferSize < frame_size)
{
#if _ALLOCATOR
if(decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16);
#else
if(decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size;
}
//TransformInverseSpatialUniversalThreadedToRow16u(
// decoder, frame, num_channels,
// (uint8_t *)decoder->RGBFilterBuffer16, info->width * 3 * 2,
// info, chroma_offset, precision);
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels,
(PIXEL16U *)decoder->RGBFilterBuffer16,
info->width * 4,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels,
(PIXEL16U *)decoder->RGBFilterBuffer16,
info->width * 4, info,
&decoder->scratch, chroma_offset, precision);
#endif
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 2; // yuv
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
return CODEC_ERROR_OKAY;
}
}
switch (format)
{
// As long as the outpitch is greater or equal to 4:2:2 16-bit YR16 this works.
case DECODED_FORMAT_WP13: //DAN20110203 - missing
case DECODED_FORMAT_W13A: //DAN20110203 - missing
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_RGB32: //32-bit format can fit the interim YR16 decode into
case DECODED_FORMAT_R408: //the output buffer
case DECODED_FORMAT_V408:
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YR16:
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels,
(PIXEL16U *)output, pitch,
info, chroma_offset, precision);
ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels,
(PIXEL16U *)output, pitch, info,
&decoder->scratch, chroma_offset, precision);
ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch,
info, chroma_offset, precision);
//Old code converts 4:2:2 directly to RGBA (single threaded.)
//TransformInverseFrameToBuffer(transform_array, frame, num_channels, output, pitch,
// info, &decoder->scratch, chroma_offset, precision);
#endif
return CODEC_ERROR_OKAY;
default:
// else Return the error code for unsupported output format
break;
}
}
}
// The output format is not supported by this routine
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
return error;
}
// Routines for converting the new encoded formats to the requested output format
CODEC_ERROR ReconstructSampleFrameRGB444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
FRAME_INFO *info = &decoder->frame;
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
TRANSFORM **transform_array = decoder->transform;
//IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
//IMAGE *wavelet;
//int wavelet_width;
//int wavelet_height;
int decoded_width = 0;
int decoded_height = 0;
int resolution = info->resolution;
//int chroma_offset = decoder->codec.chroma_offset;
//int decoded_scale;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
//TODO: Eliminate use of the chroma offset
if (decoder == NULL) {
return CODEC_ERROR_INVALID_ARGUMENT;
}
// This routine should only be called for progressive frames
assert(codec->progressive);
// The decoder can decode a video sample without returning a frame
if (output == NULL || pitch == 0) {
return CODEC_ERROR_OKAY;
}
// Does this frame have to be reconstructed?
if ((decoder->flags & DECODER_FLAGS_RENDER) == 0) {
return CODEC_ERROR_OKAY;
}
// Check that the requested frame is within the limits of the group of frames
assert(0 <= frame && frame < decoder->gop_length);
// Check that the frame resolution is valid
assert(IsValidFrameResolution(resolution));
if (!IsValidFrameResolution(resolution)) {
return CODEC_ERROR_RESOLUTION;
}
// Compute the decoded width and height
ComputeOutputDimensions(decoder, frame, &decoded_width, &decoded_height);
assert(decoded_width > 0 && decoded_height > 0);
if (info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32)
{
output += (info->height-1)*pitch;
pitch = -pitch;
}
#if (0 && DEBUG)
if (logfile) {
IMAGE *wavelet = transform[0]->wavelet[frame];
int band = 0;
fprintf(logfile, "Luminance wavelet, frame: %d, band: %d\n", frame, band);
DumpArray16s("Lowpass Band", wavelet->band[band], wavelet->width, wavelet->height, wavelet->pitch, logfile);
}
#endif
// Check that the requested frame is large enough to hold the decoded frame
#if (0 && DEBUG)
//if (! (info->width >= decoded_width))
{
if (logfile) {
//fprintf(logfile, "Requested frame not large enough to hold decoded frame: %d < %d\n", info->width, decoded_width);
fprintf(logfile, "Output frame width: %d, decoded frame width: %d\n", info->width, decoded_width);
}
}
#endif
assert(info->width >= decoded_width);
if (!(info->width >= decoded_width)) {
return CODEC_ERROR_FRAMESIZE;
}
// assert((info->height+7)/8 >= (decoded_height+7)/8);
// if (!(info->height+7)/8 >= (decoded_height+7)/8) {
// return CODEC_ERROR_FRAMESIZE;
// }
START(tk_convert);
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
//int precision = codec->precision;
int scale = 13;
int channel;
IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
int chroma_offset = decoder->codec.chroma_offset;
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[5];
if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed.
{
scale = 12;
lowpass_images[channel] = transform_array[channel]->wavelet[2];
}
}
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
scale, decoder->codec.encoded_format, decoder->frame.white_point);
}
else
// Quarter resolution
if (resolution == DECODED_RESOLUTION_QUARTER)
{
// Output quarter resolution for the two frame GOP
int precision = codec->precision;
// Reconstruct the frame to quarter resolution
ReconstructQuarterFrame(decoder, num_channels, frame, output, pitch,
info, &decoder->scratch, precision);
// Quarter resolution one frame GOP is handled in DecodeSampleIntraFrame
}
else
// Half resolution
if (resolution == DECODED_RESOLUTION_HALF)
{
IMAGE *wavelet_array[TRANSFORM_MAX_CHANNELS];
int precision = codec->precision;
int chroma_offset = 0;
int channel;
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->framenum = frame;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
// Get the first level wavelet in each channel
for (channel = 0; channel < num_channels; channel++)
{
wavelet_array[channel] = transform_array[channel]->wavelet[frame];
}
// Pack the pixels from the lowpass band in each channel into the output buffer
CopyLowpassRGB444ToBuffer(decoder, wavelet_array, num_channels, output, pitch,
info, chroma_offset, precision);
}
}
// Full resolution or half horizontal
else
{
int chroma_offset = 0;
int precision = codec->precision;
// Reconstruct the output frame from a full resolution decode
//assert(resolution == DECODED_RESOLUTION_FULL);
if(decoder->use_active_metadata_decoder)
{
int frame_size, channels = 3;
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
channels = 4;
frame_size = info->width * info->height * channels * 2;
if(decoder->RGBFilterBuffer16==NULL || decoder->RGBFilterBufferSize < frame_size)
{
#if _ALLOCATOR
if(decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16);
#else
if(decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size;
}
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RGBFilterBuffer16, info->width * channels * 2,
info, chroma_offset, precision);
#else
// Decode that last transform to rows of Bayer data (one row per channel)
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
(uint8_t *)decoder->RGBFilterBuffer16, info->width * channels * 2,
info, &decoder->scratch, chroma_offset, precision);
#endif
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
switch (info->format)
{
case DECODED_FORMAT_B64A:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2B64A);
#else
TransformInverseRGB444ToB64A(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
#endif
break;
case DECODED_FORMAT_YU64: //TODO : Threading
TransformInverseRGB444ToYU64(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
break;
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB24_INVERTED:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB32_INVERTED://TODO, needs to be threaded. WIP
TransformInverseRGB444ToRGB32(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
break;
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64: //TODO, needs to be threaded. WIP
TransformInverseRGB444ToRGB48(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
break;
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2RG30);
#else
TransformInverseRGB444ToRGB48(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
#endif
break;
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YUV);
#else
TransformInverseSpatialYUV422ToOutput(decoder, transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision,
InvertHorizontalStripRGB16sToPackedYUV8u);
#endif
break;
case DECODED_FORMAT_R408:
case DECODED_FORMAT_V408:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGBA2YUVA);
#else
assert(0);
#endif
break;
case DECODED_FORMAT_YR16:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YR16);
#else
assert(0);// missing non-threaded version
#endif
break;
case DECODED_FORMAT_V210:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2v210);
#else
assert(0);// missing non-threaded version
#endif
break;
case DECODED_FORMAT_CbYCrY_8bit: // DECODED_FORMAT_CT_UCHAR
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YUV);
#else
assert(0);// missing non-threaded version
#endif
break;
//TODO: Add code to handle other Avid pixel formats
case DECODED_FORMAT_CbYCrY_16bit: // DECODED_FORMAT_CT_SHORT
case DECODED_FORMAT_CbYCrY_10bit_2_8: // DECODED_FORMAT_CT_10Bit_2_8
case DECODED_FORMAT_CbYCrY_16bit_2_14: // DECODED_FORMAT_CT_SHORT_2_14
case DECODED_FORMAT_CbYCrY_16bit_10_6: // DECODED_FORMAT_CT_USHORT_10_6
assert(0);
break;
default:
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "Invalid decoded format: %d\n", info->format);
}
#endif
assert(0);
error = CODEC_ERROR_INVALID_FORMAT;
break;
}
}
}
STOP(tk_convert);
return error;
}
// Convert 16-bit signed lowpass data into the requested output format
void CopyLowpassRGB444ToBuffer(DECODER *decoder, IMAGE *image_array[], int num_channels,
uint8_t *output_buffer, int32_t output_pitch,
FRAME_INFO *info, int chroma_offset,
int precision)
{
bool inverted = false;
int output_width = info->width;
int output_height = info->height;
int format = info->format;
// Left shift to scale the pixels to 16 bits minus the shift already in the lowpass values
const int shift = 16 - precision - PRESCALE_LUMA;
START(tk_convert);
#if 0
// Fill the output buffer with blank values
EraseOutputBuffer(output_buffer, info->width, info->height, output_pitch, info->format);
#endif
// Determine the type of conversion
switch (info->format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
inverted = true;
case DECODED_FORMAT_RGB24_INVERTED:
case DECODED_FORMAT_RGB32_INVERTED:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64: //WIP
ConvertLowpassRGB444ToRGB(image_array, output_buffer, output_width, output_height,
output_pitch, format, inverted, shift, num_channels);
break;
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
{
IMAGE *g_image = image_array[0];
IMAGE *r_image = image_array[1];
IMAGE *b_image = image_array[2];
if (info->format == COLOR_FORMAT_YUYV)
{
ConvertRGB2YUV(r_image->band[0], g_image->band[0], b_image->band[0],
r_image->pitch, g_image->pitch, b_image->pitch,
output_buffer, output_pitch,
output_width, output_height, 14,
info->colorspace, info->format);
}
else if (info->format == COLOR_FORMAT_UYVY)
{
ConvertRGB2UYVY(r_image->band[0], g_image->band[0], b_image->band[0],
r_image->pitch, g_image->pitch, b_image->pitch,
output_buffer, output_pitch,
output_width, output_height, 14,
info->colorspace, info->format);
}
}
break;
default:
{
int y;
IMAGE *g_image = image_array[0];
IMAGE *r_image = image_array[1];
IMAGE *b_image = image_array[2];
IMAGE *a_image = image_array[3];
unsigned short *scanline = (unsigned short *)decoder->scratch.free_ptr;
//unsigned short *scanline2 = scanline + output_width*3;
uint8_t *newline = (uint8_t *)output_buffer;
unsigned short *Rptr,*Gptr,*Bptr,*Aptr = NULL;
Rptr = (unsigned short *)r_image->band[0];
Gptr = (unsigned short *)g_image->band[0];
Bptr = (unsigned short *)b_image->band[0];
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
Aptr = (unsigned short *)a_image->band[0];
for(y=0; y<output_height; y++)
{
int flags = (ACTIVEMETADATA_PLANAR);
int whitebitdepth = 14;
memcpy(scanline, Rptr, info->width*2);
memcpy(scanline+info->width, Gptr, info->width*2);
memcpy(scanline+info->width*2, Bptr, info->width*2);
memcpy(scanline+info->width*3, Aptr, info->width*2);
Rptr += r_image->pitch/2;
Gptr += g_image->pitch/2;
Bptr += b_image->pitch/2;
Aptr += a_image->pitch/2;
Convert4444LinesToOutput(decoder, info->width, 1, y, scanline,
newline, output_pitch, info->format, whitebitdepth, flags);
newline += output_pitch;
}
}
else
{
for(y=0; y<output_height; y++)
{
int flags = (ACTIVEMETADATA_PLANAR);
int whitebitdepth = 14;
memcpy(scanline, Rptr, info->width*2);
memcpy(scanline+info->width, Gptr, info->width*2);
memcpy(scanline+info->width*2, Bptr, info->width*2);
Rptr += r_image->pitch/2;
Gptr += g_image->pitch/2;
Bptr += b_image->pitch/2;
ConvertLinesToOutput(decoder, info->width, 1, y, scanline,
newline, output_pitch, info->format, whitebitdepth, flags);
newline += output_pitch;
}
}
}
//assert(0);
break;
}
STOP(tk_convert);
}
#if _THREADED
// Threaded inverse transform using the new threads API
void TransformInverseSpatialThreadedYUV422ToBuffer(DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//TODO: Add support for more output formats
int format = DECODED_FORMAT_RGB32;
// The upper and lower spatial transforms only share the middle rows
int transform_height = (((info->height + 7) / 8) * 8) / 2;
int middle_row_count = transform_height;
// Data structure for passing information to the worker threads
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
// Inverse horizontal filter that outputs the desired format
HorizontalInverseFilterOutputProc horizontal_filter_proc;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Choose the correct inverse horizontal filter for the output format
switch (format)
{
case DECODED_FORMAT_RGB32:
horizontal_filter_proc = InvertHorizontalStripYUV16sToPackedRGB32;
break;
default:
assert(0);
return;
}
// Post a message to the mailbox
mailbox->horizontal_filter_proc = horizontal_filter_proc;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
mailbox->jobType = JOB_TYPE_WAVELET;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "All worker threads signalled done\n");
}
#endif
}
// Threaded inverse transform using the new threads API
// Convert RGB RGBA or BAYER (4 channel) data to a 16-bit planar format
void TransformInverseSpatialUniversalThreadedToRow16u(DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
// The upper and lower spatial transforms only share the middle rows
int transform_height = (((info->height + 7) / 8) * 8) / 2;
int middle_row_count = transform_height;
// Data structure for passing information to the worker threads
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
// Inverse horizontal filter that outputs the desired format
HorizontalInverseFilterOutputProc horizontal_filter_proc;
horizontal_filter_proc = InvertHorizontalStrip16sToRow16uPlanar;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->horizontal_filter_proc = horizontal_filter_proc;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
mailbox->jobType = JOB_TYPE_WAVELET;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
// Threaded inverse transform using the new threads API
// Convert RGB RGBA or BAYER (4 channel) data to a 16-bit planar format
void TransformInverseSpatialUniversalThreadedToOutput(
DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision,
HorizontalInverseFilterOutputProc horizontal_filter_proc)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
// The upper and lower spatial transforms only share the middle rows
int transform_height = (((info->height + 7) / 8) * 8) / 2;
int middle_row_count = transform_height;
// Data structure for passing information to the worker threads
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
// Inverse horizontal filter that outputs the desired format
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->horizontal_filter_proc = horizontal_filter_proc;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
mailbox->jobType = JOB_TYPE_WAVELET;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
// Routines for the worker threads that use the new threads API
void TransformInverseSpatialSectionToOutput(DECODER *decoder, int thread_index,
int frame_index, int num_channels,
uint8_t *output_buffer, int output_pitch, FRAME_INFO *info,
int chroma_offset, int precision,
HorizontalInverseFilterOutputProc horizontal_filter_proc)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
TRANSFORM **transform = decoder->transform;
const SCRATCH *scratch = &decoder->scratch;
PIXEL *lowlow_band[CODEC_MAX_CHANNELS];
PIXEL *lowhigh_band[CODEC_MAX_CHANNELS];
PIXEL *highlow_band[CODEC_MAX_CHANNELS];
PIXEL *highhigh_band[CODEC_MAX_CHANNELS];
int lowlow_pitch[CODEC_MAX_CHANNELS];
int lowhigh_pitch[CODEC_MAX_CHANNELS];
int highlow_pitch[CODEC_MAX_CHANNELS];
int highhigh_pitch[CODEC_MAX_CHANNELS];
int channel_width[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr;
uint8_t *plane_array[TRANSFORM_MAX_CHANNELS];
int plane_pitch[TRANSFORM_MAX_CHANNELS];
int output_width = info->width;
int output_height = info->height;
int half_height = output_height/2;
int luma_band_width;
ROI strip;
char *bufptr;
int last_row;
int last_display_row;
int last_line;
int channel;
int row;
int odd_display_lines = 0;
THREAD_ERROR error;
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
//TODO: Replace uses of buffer variables with calls to the scratch space API
// This version is for 16-bit pixels
assert(sizeof(PIXEL) == 2);
// Must have a valid inverse horizontal filter
assert(horizontal_filter_proc != NULL);
// Check for enough space in the local array allocations
// assert(num_channels <= CODEC_NUM_CHANNELS);
assert(num_channels <= TRANSFORM_MAX_CHANNELS);
// Divide the buffer space between the four threads
buffer_size /= decoder->worker_thread.pool.thread_count; // used to assume max of 4
buffer += buffer_size * thread_index;
// Round the buffer pointer up to the next cache line
buffer_size -= (_CACHE_LINE_SIZE - ((uintptr_t)buffer & _CACHE_LINE_MASK));
bufptr = (char *)ALIGN(buffer, _CACHE_LINE_SIZE);
// Allocate buffer space for the output rows from each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the row width for this channel
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
int width = wavelet->width;
int height = wavelet->height;
//int pitch = wavelet->pitch;
size_t channel_buffer_size;
// Compute the width and pitch for the output rows stored in this buffer
int buffer_width = 2 * width;
int buffer_height = 2;
int buffer_pitch = ALIGN16(buffer_width);
// Compute the total allocation for this channel
channel_buffer_size = buffer_height * buffer_pitch;
// Check that there is enough space available
assert(channel_buffer_size <= buffer_size);
// Allocate the buffer for this channel
plane_array[channel] = (uint8_t *)bufptr;
// Remember the pitch for rows in this channel
plane_pitch[channel] = buffer_pitch;
// Advance the buffer pointer past the allocated space for this channel
bufptr += channel_buffer_size;
// Reduce the amount of space remaining in the buffer
buffer_size -= channel_buffer_size;
// The dimensions of the output image are the same as the luma channel
if (channel == 0)
{
strip.width = buffer_width;
strip.height = buffer_height;
last_row = height;
//DAN20050606 Added to fix issue with non-div by 8 heihts.
last_display_row = (info->height+1)/2; // DAN20090215 -- fix for odd display lines.
odd_display_lines = info->height & 1;
// Remember the width of the wavelet bands for luma
luma_band_width = width;
}
// Save the bands per channel for routines that process all channels at once
lowlow_band[channel] = wavelet->band[0];
lowhigh_band[channel] = wavelet->band[1];
highlow_band[channel] = wavelet->band[2];
highhigh_band[channel] = wavelet->band[3];
lowlow_pitch[channel] = wavelet->pitch;
lowhigh_pitch[channel] = wavelet->pitch;
highlow_pitch[channel] = wavelet->pitch;
highhigh_pitch[channel] = wavelet->pitch;
// Remember the width of the wavelet for this channel
channel_width[channel] = width;
}
// Use the remaining buffer space for intermediate results
buffer_size -= (_CACHE_LINE_SIZE - ((uintptr_t)bufptr & _CACHE_LINE_MASK));
buffer = (char *)ALIGN(bufptr, _CACHE_LINE_SIZE);
if (last_row == last_display_row)
{
last_line = half_height - 1;
}
else
{
last_line = half_height;
}
if(odd_display_lines)
last_line++;
if (thread_index == TRANSFORM_WORKER_TOP_THREAD)
{
// Process the first row
row = 0;
output_row_ptr = output_buffer;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row);
}
#endif
// Process the first row using special border filters for the top row
InvertSpatialTopRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch,
lowhigh_band, lowhigh_pitch,
highlow_band, highlow_pitch,
highhigh_band, highhigh_pitch,
output_row_ptr, output_pitch,
output_width, info->format, info->colorspace,
row, channel_width,
(PIXEL *)buffer, buffer_size,
precision,
horizontal_filter_proc);
}
if (thread_index == TRANSFORM_WORKER_BOTTOM_THREAD || decoder->worker_thread.pool.thread_count == 1)
{
if(last_row == last_display_row) //DAN20071218 -- Added as old 1080 RAW files would crash
{
int pitch = output_pitch;
// Process the last row
row = last_row - 1;
if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) // 3d work
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC)
pitch >>= 1;
// Begin filling the last output row with results
output_row_ptr = output_buffer + row * 2 * pitch;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row);
}
#endif
// Process the last row using special border filters for the bottom row
if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV)
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED) // 3d Work TODO Fix
output_row_ptr -= output_pitch;
InvertSpatialBottomRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch,
lowhigh_band, lowhigh_pitch,
highlow_band, highlow_pitch,
highhigh_band, highhigh_pitch,
output_row_ptr, output_pitch,
output_width, info->format, info->colorspace,
row, channel_width,
(PIXEL *)buffer, buffer_size,
precision, odd_display_lines,
horizontal_filter_proc);
}
}
// Loop until all of the middle rows have been processed
for (;;)
{
int work_index;
int row;
// Wait for one row from each channel to process
error = PoolThreadWaitForWork(&decoder->worker_thread.pool, &work_index, thread_index);
// Is there another row to process?
if (error == THREAD_ERROR_OKAY)
{
int pitch = output_pitch;
// Compute the next row to process from the work index
row = work_index + 1;
if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) // 3d work
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC) // stacked
pitch >>= 1;
// Compute the output row corresponding to this row index
output_row_ptr = output_buffer + row * 2 * pitch;
}
else
{
// No more work to do
return;
}
// Is the row inside the top and bottom border?
if (0 < row && row < last_line)
{
int outputlines = 2;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row);
}
#endif
if(odd_display_lines && row==last_line-1)
{
outputlines = 1;
}
// Process the middle row using the normal wavelet filters
InvertSpatialMiddleRow16sToOutput(decoder, thread_index,
lowlow_band, lowlow_pitch,
lowhigh_band, lowhigh_pitch,
highlow_band, highlow_pitch,
highhigh_band, highhigh_pitch,
output_row_ptr, output_pitch,
output_width, info->format, info->colorspace,
row, channel_width,
(PIXEL *)buffer, buffer_size,
precision,
horizontal_filter_proc,
outputlines);
}
}
}
#endif //_THREADED
bool GetTuplet(unsigned char *data, int datasize,
unsigned short findtag, unsigned short *retvalue)
{
bool ret = false;
BITSTREAM myinput, *pinput;
TAGVALUE segment;
TAGWORD tag,value;
int error = 0;
//char t[100];
InitBitstream(&myinput);
myinput.lpCurrentWord = data;
myinput.nWordsUsed = datasize;
pinput = &myinput;
do
{
bool optional = false;
int chunksize = 0;
// Read the next tag value pair from the bitstream
segment = GetSegment(pinput);
tag = segment.tuple.tag;
value = segment.tuple.value;
// Is this an optional tag?
if (tag < 0)
{
tag = NEG(tag);
optional = true;
}
if(tag & 0x2000)
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
}
else if(tag & 0x4000)
{
chunksize = value;
chunksize &= 0xffff;
}
else if(tag == CODEC_TAG_INDEX)
{
chunksize = value;
chunksize &= 0xffff;
}
else
{
chunksize = 0;
}
if((int)(tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || tag & 0x6000)
{
int skip = 1;
error = 0;
if(tag == (int)findtag)
{
*retvalue = value;
ret = true;
break;
}
if((tag & 0xff00) == 0x2200) //sample size
{
chunksize = 0; // don't test against pinput->nWordsUsed, as we might be only reader enough for metadata only.
skip = 0;
}
if((tag & 0xff00) == 0x2300) //uncompressed sample size
{
skip = 1;
}
if((tag & 0xff00) == 0x2100) //level
skip = 0;
if(chunksize)
{
if(chunksize*4 > pinput->nWordsUsed || chunksize < 0)
{
break;
}
if(skip)
{
//unsigned int *iptr = (unsigned int *)pinput->lpCurrentWord;
pinput->lpCurrentWord += chunksize*4;
pinput->nWordsUsed -= chunksize*4;
}
}
}
else
{
error = 1;
}
} while(tag != CODEC_TAG_GROUP_TRAILER &&
tag != CODEC_TAG_FRAME_TRAILER &&
pinput->nWordsUsed>0 && !error);
return ret;
}
/*!
Copied from metadata.cpp in the cedoc common directory
*/
uint8_t *GetTupletAddr(uint8_t *data,
int datasize,
uint16_t findtag,
int16_t *retvalue)
{
unsigned char *ret = NULL;
BITSTREAM myinput, *pinput;
TAGVALUE segment;
TAGWORD tag,value;
int error = 0;
if (data == NULL || datasize == 0) {
return NULL;
}
//InitBitstream(&myinput);
memset(&myinput, 0, sizeof(BITSTREAM));
myinput.lpCurrentWord = data;
myinput.nWordsUsed = datasize;
myinput.nBitsFree = BITSTREAM_LONG_SIZE;
pinput = &myinput;
do
{
//BOOL optional = FALSE;
bool optional = false;
int chunksize = 0;
// Read the next tag value pair from the bitstream
segment = GetSegment(pinput);
tag = segment.tuple.tag;
value = segment.tuple.value;
// Is this an optional tag?
if (tag < 0)
{
tag = NEG(tag);
//optional = TRUE;
optional = true;
}
if(tag & 0x2000)
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
}
else if(tag & 0x4000)
{
chunksize = value;
chunksize &= 0xffff;
}
else if(tag == CODEC_TAG_INDEX)
{
chunksize = value;
chunksize &= 0xffff;
}
else
{
chunksize = 0;
}
if((int)(tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || tag & 0x6000)
{
int skip = 1;
error = 0;
if(tag == (int)findtag)
{
*retvalue = value;
ret = pinput->lpCurrentWord;
break;
}
if((tag & 0xff00) == 0x2200) //sample size
{
chunksize = 0; // don't test against pinput->nWordsUsed, as we might be only reader enough for metadata only.
skip = 0;
}
if((tag & 0xff00) == 0x2300) //uncompressed sample size
{
skip = 1;
}
if((tag & 0xff00) == 0x2100) //level
skip = 0;
if(chunksize)
{
if(chunksize*4 > pinput->nWordsUsed || chunksize < 0)
{
break;
}
if(skip)
{
//unsigned int *iptr = (unsigned int *)pinput->lpCurrentWord;
pinput->lpCurrentWord += chunksize*4;
pinput->nWordsUsed -= chunksize*4;
}
}
}
else
{
error = 1;
}
} while(tag != CODEC_TAG_GROUP_TRAILER &&
tag != CODEC_TAG_FRAME_TRAILER &&
pinput->nWordsUsed>0 && !error);
return ret;
}
|
GB_unop__identity_fp64_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp64_uint16)
// op(A') function: GB (_unop_tran__identity_fp64_uint16)
// C type: double
// A type: uint16_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = (double) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (double) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp64_uint16)
(
double *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp64_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_master.c | #include <stdio.h>
int main()
{
#pragma omp parallel
{
#pragma omp master
{
printf("1.Hello OpenMP\n");
}
printf("2.Hello OpenMP\n");
}
/* implicit barrier */
return 0;
}
|
taskloop_nogroup_untied_scheduling.c | // RUN: %libomp-compile && env KMP_ABT_NUM_ESS=4 %libomp-run
// REQUIRES: abt && !clang
// Clang 10.0 seems ignoring the taskloop's "untied" attribute.
// We mark taskloop + untied with Clang as unsupported so far.
#include "omp_testsuite.h"
#include "bolt_scheduling_util.h"
int test_taskloop_nogroup_untied_scheduling() {
int i, vals[6];
memset(vals, 0, sizeof(int) * 6);
timeout_barrier_t barrier;
timeout_barrier_init(&barrier);
#pragma omp parallel num_threads(4)
{
// 6 barrier_waits in tasks and 2 barrier_waits in threads
#pragma omp master
{
check_num_ess(4);
#pragma omp taskloop grainsize(1) nogroup untied
for (i = 0; i < 6; i++) {
timeout_barrier_wait(&barrier, 4);
vals[i] = 1;
}
}
if (omp_get_thread_num() < 2) {
// master does not wait the completion of taskloop.
timeout_barrier_wait(&barrier, 4);
}
}
#pragma omp parallel num_threads(4)
{
// 6 barrier_waits in tasks and 2 barrier_waits in threads
#pragma omp master
{
check_num_ess(4);
#pragma omp taskloop grainsize(1) nogroup untied
for (i = 0; i < 6; i++) {
#pragma omp taskyield
timeout_barrier_wait(&barrier, 4);
vals[i] = 1;
}
}
if (omp_get_thread_num() < 2) {
// master does not wait the completion of taskloop.
timeout_barrier_wait(&barrier, 4);
}
}
for (i = 0; i < 6; i++) {
if (vals[i] != 1) {
printf("vals[%d] == %d\n", i, vals[i]);
return 0;
}
}
return 1;
}
int main() {
int i, num_failed = 0;
for (i = 0; i < REPETITIONS; i++) {
if (!test_taskloop_nogroup_untied_scheduling()) {
num_failed++;
}
}
return num_failed;
}
|
test-mempool.c | /* xZTL: Zone Translation Layer User-space Library
*
* Copyright 2019 Samsung Electronics
*
* Written by Ivan L. Picoli <i.picoli@samsung.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <omp.h>
#include <pthread.h>
#include <xztl-mempool.h>
#include <xztl.h>
#include <ztl-media.h>
#include "CUnit/Basic.h"
static const char **devname;
static void cunit_mempool_assert_ptr(char *fn, void *ptr) {
CU_ASSERT((uint64_t)ptr != 0);
if (!ptr)
printf("\n %s: ptr %p\n", fn, ptr);
}
static void cunit_mempool_assert_int(char *fn, int status) {
CU_ASSERT(status == 0);
if (status)
printf(" %s: %x\n", fn, status);
}
static int cunit_mempool_init(void) {
return 0;
}
static int cunit_mempool_exit(void) {
return 0;
}
static void test_mempool_init(void) {
int ret;
ret = znd_media_register(*devname);
cunit_mempool_assert_int("znd_media_register", ret);
if (ret)
return;
ret = xztl_media_init();
cunit_mempool_assert_int("xztl_media_init", ret);
if (ret)
return;
cunit_mempool_assert_int("xztl_mempool_init", xztl_mempool_init());
}
static void test_mempool_create(void) {
uint16_t type, tid, ents;
uint32_t ent_sz;
type = XZTL_MEMPOOL_MCMD;
tid = 0;
ents = 32;
ent_sz = 1024;
cunit_mempool_assert_int(
"xztl_mempool_create",
xztl_mempool_create(type, tid, ents, ent_sz, NULL, NULL));
}
static void test_mempool_destroy(void) {
uint16_t type, tid;
type = XZTL_MEMPOOL_MCMD;
tid = 0;
cunit_mempool_assert_int("xztl_mempool_destroy",
xztl_mempool_destroy(type, tid));
}
static void test_mempool_create_mult(void) {
uint16_t type, tid, ents;
uint32_t ent_sz;
type = XZTL_MEMPOOL_MCMD;
ents = 32;
ent_sz = 128;
#pragma omp parallel for
for (tid = 0; tid < 8; tid++) {
cunit_mempool_assert_int(
"xztl_mempool_create",
xztl_mempool_create(type, tid, ents, ent_sz, NULL, NULL));
}
}
static void test_mempool_get_put(void) {
uint16_t ent_i, ents = 30;
uint32_t ent_sz = 128;
struct xztl_mp_entry *ent[ents];
/* Get entries */
for (ent_i = 0; ent_i < ents; ent_i++) {
ent[ent_i] = xztl_mempool_get(XZTL_MEMPOOL_MCMD, 0);
cunit_mempool_assert_ptr("xztl_mempool_get", ent[ent_i]);
}
/* Modify entry bytes */
for (ent_i = 0; ent_i < ents; ent_i++)
memset(ent[ent_i]->opaque, 0x0, ent_sz);
/* Put entries */
for (ent_i = 0; ent_i < ents; ent_i++) {
xztl_mempool_put(ent[ent_i], XZTL_MEMPOOL_MCMD, 0);
CU_PASS("xztl_mempool_put");
}
/* Repeat the process */
for (ent_i = 0; ent_i < ents; ent_i++) {
ent[ent_i] = xztl_mempool_get(XZTL_MEMPOOL_MCMD, 0);
cunit_mempool_assert_ptr("xztl_mempool_get", ent[ent_i]);
}
for (ent_i = 0; ent_i < ents; ent_i++)
memset(ent[ent_i]->opaque, 0x0, ent_sz);
for (ent_i = 0; ent_i < ents; ent_i++) {
xztl_mempool_put(ent[ent_i], XZTL_MEMPOOL_MCMD, 0);
CU_PASS("xztl_mempool_put");
}
}
static void test_mempool_exit(void) {
cunit_mempool_assert_int("xztl_mempool_exit", xztl_mempool_exit());
cunit_mempool_assert_int("xztl_media_exit", xztl_media_exit());
}
int main(int argc, const char **argv) {
int failed;
if (argc < 2) {
printf("Please provide the device path. e.g. liou:/dev/nvme0n2\n");
return -1;
}
devname = &argv[1];
printf("Device: %s\n", *devname);
CU_pSuite pSuite = NULL;
if (CUE_SUCCESS != CU_initialize_registry())
return CU_get_error();
pSuite =
CU_add_suite("Suite_mempool", cunit_mempool_init, cunit_mempool_exit);
if (pSuite == NULL) {
CU_cleanup_registry();
return CU_get_error();
}
if ((CU_add_test(pSuite, "Initialize", test_mempool_init) == NULL) ||
(CU_add_test(pSuite, "Create a mempool", test_mempool_create) ==
NULL) ||
(CU_add_test(pSuite, "Destroy a mempool", test_mempool_destroy) ==
NULL) ||
(CU_add_test(pSuite, "Create parallel mempools",
test_mempool_create_mult) == NULL) ||
(CU_add_test(pSuite, "Get and put entries", test_mempool_get_put) ==
NULL) ||
(CU_add_test(pSuite, "Closes the module", test_mempool_exit) == NULL)) {
CU_cleanup_registry();
return CU_get_error();
}
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
failed = CU_get_number_of_tests_failed();
CU_cleanup_registry();
return failed;
}
|
original.c | #include <omp.h>
int i, j, k;
double a[2000UL][2000UL];
double b[2000UL][2000UL];
double c[2000UL][2000UL];
void compute() {
#pragma omp parallel for shared( a, b, c) private(i, j, k)
for (i = 0; i < 2000; i++) {
for (j = 0; j < 2000; j++) {
for (k = 0; k < 2000; k++) {
c[i][j] += (a[i][k] * b[k][j]);
}
}
}
}
int main(int argc,char *argv[]) {
#pragma omp parallel for shared(a) private(i, j)
for (i = 0; i < 2000; i++) {
for (j = 0; j < 2000; j++) {
a[i][j] = (i + j);
}
}
#pragma omp parallel for shared(b) private(i, j)
for (i = 0; i < 2000; i++) {
for (j = 0; j < 2000; j++) {
b[i][j] = (i * j);
}
}
#pragma omp parallel for shared(c) private(i, j)
for (i = 0; i < 2000; i++) {
for (j = 0; j < 2000; j++) {
c[i][j] = 0;
}
}
compute();
return 0;
}
|
hermv_c_bsr_u_lo_trans.c | #include<string.h>
#ifdef _OPENMP
#include<omp.h>
#endif
#include"alphasparse/opt.h"
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_BSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows * A->block_size;
const ALPHA_INT n = A->cols * A->block_size;
const ALPHA_INT bs = A->block_size;
const ALPHA_INT bs2 = bs * bs;
// assert(m==n);
ALPHA_INT b_rows = A->rows;
ALPHA_INT b_cols = A->cols;
if (b_rows != b_cols)
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_INT partition[thread_num + 1];
balanced_partition_row_by_nnz(A->rows_end, b_rows, thread_num, partition);
ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num);
#ifdef _OPENMP
#pragma omp parallel num_threads(thread_num)
#endif
{
const ALPHA_INT tid = alpha_get_thread_id();
const ALPHA_INT local_m_s = partition[tid];
const ALPHA_INT local_m_e = partition[tid + 1];
tmp[tid] = (ALPHA_Number *)malloc(sizeof(ALPHA_Number) * b_rows * bs);
memset(tmp[tid], 0, sizeof(ALPHA_Number) * b_rows * bs);
if (A->block_layout == ALPHA_SPARSE_LAYOUT_ROW_MAJOR)
{
for (ALPHA_INT br = local_m_s; br < local_m_e; br++)
{
ALPHA_INT row = br * bs;
ALPHA_INT block_start = A->rows_start[br], block_end = A->rows_end[br];
ALPHA_INT lower_end = alpha_upper_bound(&A->col_indx[block_start], &A->col_indx[block_end], br) - A->col_indx;
for (ALPHA_INT ai = block_start; ai < lower_end; ai++)
{
ALPHA_INT bc = A->col_indx[ai];
ALPHA_INT col = bc * bs;
ALPHA_INT a0_idx = ai * bs2;
// diagonal block containing diagonal entry
if (bc == br)
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
//tmp[tid][b_row + row] += alpha*A->values[a0_idx + (b_row + 1) * bs]*x[col + b_col];
for (ALPHA_INT b_col = 0; b_col < b_row; b_col++)
{
alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]);
}
}
}
else
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]);
}
}
}
}
}
}
else if (A->block_layout == ALPHA_SPARSE_LAYOUT_COLUMN_MAJOR)
{
for (ALPHA_INT br = local_m_s; br < local_m_e; br++)
{
ALPHA_INT row = br * bs;
ALPHA_INT block_start = A->rows_start[br], block_end = A->rows_end[br];
ALPHA_INT lower_end = alpha_upper_bound(&A->col_indx[block_start], &A->col_indx[block_end], br) - A->col_indx;
for (ALPHA_INT ai = block_start; ai < lower_end; ai++)
{
ALPHA_INT bc = A->col_indx[ai];
ALPHA_INT col = bc * bs;
ALPHA_INT a0_idx = ai * bs2;
// diagonal block containing diagonal entry
if (bc == br)
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
for (ALPHA_INT b_row = b_col + 1; b_row < bs; b_row++)
{
alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]);
}
}
}
else
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]);
}
}
}
}
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < b_cols * bs; ++i)
{
ALPHA_Number tmp_y;
alpha_setzero(tmp_y);
for (ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(tmp_y, tmp_y, tmp[j][i]);
//tmp_y += tmp[j][i];
}
alpha_mul(y[i], y[i], beta);
alpha_madde(y[i], x[i], alpha);
alpha_madde(y[i], tmp_y, alpha);
//y[i] = y[i]*beta + tmp_y*alpha;
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < thread_num; ++i)
{
free(tmp[i]);
}
free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
PixelBasedRenderFunc.h | #include"..\Core\SanTypes.h"
#include"SanGraphics.h"
namespace San
{
namespace Graphics
{
#ifndef __SANPIXELBASEDRENDERFUNC_H__
#define __SANPIXELBASEDRENDERFUNC_H__
struct PIXCAMERA
{
SVECTOR3 Coord;
SVECTOR3 LookAt;
SVECTOR3 Dir;
SVECTOR3 UpDir;
sfloat Cutoff;
sfloat Near;
sfloat Far;
SVECTOR3 ScreenVec[4];
};
void ClearScreenRGBA(sfloat* p_buffer, const uint32 width, const uint32 height, const SANCOLORF &Color);
void DrawImageRGBA(sfloat* p_buffer, const uint32 buf_width, const uint32 buf_height, sfloat* p_img, const uint32 img_width, const uint32 img_height, const SVECTOR3 &pos, bool bUseAlpha = false);
void DrawImageRGB(sfloat* p_buffer, const uint32 buf_width, const uint32 buf_height, sfloat* p_img, const uint32 img_width, const uint32 img_height, const SVECTOR3 &pos);
void DrawImageGray(sfloat* p_buffer, const uint32 buf_width, const uint32 buf_height, sfloat* p_img, const uint32 img_width, const uint32 img_height, const SVECTOR3 &pos);
void DrawCrossRGBA(sfloat* p_buffer, const uint32 width, uint32 height, const SVECTOR3 pos, const SANCOLORF color, const uint32 size,const uint32 thickness);
void DrawDotRGBA(sfloat* p_buffer, const uint32 width, uint32 height, const SVECTOR3 pos, const SANCOLORF color, sfloat radius);
void DrawRectangleRGBA(sfloat* p_buffer, const uint32 buf_width, const uint32 buf_height, const uint32 quadr_width, const uint32 quadr_height, const SANCOLORF color, const SVECTOR3 &pos);
void DrawQuadrRGBA(sfloat* p_buffer, const uint32 buf_width, const uint32 buf_height, const uint32 quadr_width, const uint32 quadr_height, const SANCOLORF color, const SVECTOR3 &pos, const uint32 size);
void DrawLineRGBA(sfloat* p_buffer, const uint32 buf_width, const uint32 buf_height, const SVECTOR3 pos_begin, const SVECTOR3 pos_end, const SANCOLORF color, const uint32 size);
void UpdateCamera(PIXCAMERA &Camera, uint32 ScreenWidth, uint32 ScreenHeight);
SVECTOR3 GetScreenCoord(const SVECTOR3 Coord, const PIXCAMERA Camera, uint32 ScreenWidth, uint32 ScreenHeight);
sfloat GetHitPoint(const SVECTOR3* pRayCoord, const SVECTOR3* pRayDir, const SVECTOR3* pVec1, const SVECTOR3* pVec2, const SVECTOR3* pVec3, const SVECTOR3* pNormal, SVECTOR3* pHitPoint);
SVECTOR3 Interpolation(const SVECTOR3* pVecSrc, const SVECTOR3* pVec1, const SVECTOR3* pVec2, const SVECTOR3* pVec3);
void DrawDot3DRGBA(sfloat* p_buffer, const uint32 width, uint32 height, const SVECTOR3 pos, const SANCOLORF color, sfloat radius, const PIXCAMERA camera, const SVECTOR3 offset = SVECTOR3(0.0, 0.0, 0.0));
void DrawLine3DRGBA(sfloat* p_buffer, const uint32 buf_width, const uint32 buf_height, const SVECTOR3 coord_begin, const SVECTOR3 coord_end, const SANCOLORF color, const uint32 size, const PIXCAMERA camera, const SVECTOR3 offset = SVECTOR3(0.0, 0.0, 0.0));
void DrawLineGraph(sfloat* p_buffer, const uint32 buf_width, const uint32 buf_height, const uint32 dest_width, const uint32 dest_height, sfloat* p_dataset, uint32 data_size, const SVECTOR3 pos, const SANCOLORF color, const uint32 size);
template<class DType, class SType> void BufferCopy(DType* pDest, SType* pSrc, uint32 BufferSize, sfloat min = 0.0f, sfloat max = 255.0f)
{
#pragma omp parallel for
for (int32 seek = 0; seek < BufferSize; seek = seek + 1)
{
pDest[seek] = pSrc[seek]<min ? min : (pSrc[seek]>max ? max : pSrc[seek]);
}
}
#endif
}
} |
gather.c | #include <stdlib.h>
#include "gather.h"
void gather(float* w,float* x,int C,int W,float*output){
#pragma omp parallel for
for (int H6 = 0; H6 < W; H6++) {
for (int H7 = 0; H7 < C; H7++) {
float tmp2 = 0;
if (H6 + H7 < W) {
float tmp3 = 0;
tmp3 = x[H6 + H7];
float tmp4 = 0;
tmp4 = w[H7];
tmp2 = tmp3 * tmp4;
}
output[H6] = output[H6] + tmp2;
}
}
}
|
sageInterface.h | #ifndef ROSE_SAGE_INTERFACE
#define ROSE_SAGE_INTERFACE
#include "sage3basic.hhh"
#include <stdint.h>
#include <utility>
#include "rosePublicConfig.h" // for ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
#include "OmpAttribute.h"
#if 0 // FMZ(07/07/2010): the argument "nextErrorCode" should be call-by-reference
SgFile* determineFileType ( std::vector<std::string> argv, int nextErrorCode, SgProject* project );
#else
SgFile* determineFileType ( std::vector<std::string> argv, int& nextErrorCode, SgProject* project );
#endif
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "rewrite.h"
#endif
// DQ (7/20/2008): Added support for unparsing abitrary strings in the unparser.
#include "astUnparseAttribute.h"
#include <set>
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "LivenessAnalysis.h"
#include "abstract_handle.h"
#include "ClassHierarchyGraph.h"
#endif
// DQ (8/19/2004): Moved from ROSE/src/midend/astRewriteMechanism/rewrite.h
//! A global function for getting the string associated with an enum (which is defined in global scope)
ROSE_DLL_API std::string getVariantName (VariantT v);
// DQ (12/9/2004): Qing, Rich and Dan have decided to start this namespace within ROSE
// This namespace is specific to interface functions that operate on the Sage III AST.
// The name was chosen so as not to conflict with other classes within ROSE.
// This will become the future home of many interface functions which operate on
// the AST and which are generally useful to users. As a namespace multiple files can be used
// to represent the compete interface and different developers may contribute interface
// functions easily.
// Constructor handling: (We have sageBuilder.h now for this purpose, Liao 2/1/2008)
// We could add simpler layers of support for construction of IR nodes by
// hiding many details in "makeSg***()" functions. Such functions would
// return pointers to the associated Sg*** objects and would be able to hide
// many IR specific details, including:
// memory handling
// optional parameter settings not often required
// use of Sg_File_Info objects (and setting them as transformations)
//
// namespace AST_Interface (this name is taken already by some of Qing's work :-)
//! An alias for Sg_File_Info::generateDefaultFileInfoForTransformationNode()
#define TRANS_FILE Sg_File_Info::generateDefaultFileInfoForTransformationNode()
/** Functions that are useful when operating on the AST.
*
* The Sage III IR design attempts to be minimalist. Thus additional functionality is intended to be presented using separate
* higher level interfaces which work with the IR. This namespace collects functions that operate on the IR and support
* numerous types of operations that are common to general analysis and transformation of the AST. */
namespace SageInterface
{
// Liao 6/22/2016: keep records of loop init-stmt normalization, later help undo it to support autoPar.
struct Transformation_Record
{
// a lookup table to check if a for loop has been normalized for its c99-style init-stmt
std::map <SgForStatement* , bool > forLoopInitNormalizationTable;
// Detailed record about the original declaration (1st in the pair) and the normalization generated new declaration (2nd in the pair)
std::map <SgForStatement* , std::pair<SgVariableDeclaration*, SgVariableDeclaration*> > forLoopInitNormalizationRecord;
} ;
ROSE_DLL_API extern Transformation_Record trans_records;
// DQ (4/3/2014): Added general AST support separate from the AST.
// Container and API for analysis information that is outside of the AST and as a result
// prevents frequent modification of the IR.
class DeclarationSets
{
// DQ (4/3/2014): This stores all associated declarations as a map of sets.
// the key to the map is the first nondefining declaration and the elements of the set are
// all of the associated declarations (including the defining declaration).
private:
//! Map of first-nondefining declaration to all other associated declarations.
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > declarationMap;
public:
void addDeclaration(SgDeclarationStatement* decl);
const std::set<SgDeclarationStatement*>* getDeclarations(SgDeclarationStatement* decl);
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > & getDeclarationMap();
bool isLocatedInDefiningScope(SgDeclarationStatement* decl);
};
// DQ (4/3/2014): This constructs a data structure that holds analysis information about
// the AST that is separate from the AST. This is intended to be a general mechanism
// to support analysis information without constantly modifying the IR.
DeclarationSets* buildDeclarationSets(SgNode*);
//! An internal counter for generating unique SgName
ROSE_DLL_API extern int gensym_counter;
#ifdef ROSE_BUILD_BINARY_ANALYSIS_SUPPORT
//! Find the main interpretation.
SgAsmInterpretation* getMainInterpretation(SgAsmGenericFile* file);
//! Get the unsigned value of a disassembled constant.
uint64_t getAsmConstant(SgAsmValueExpression* e);
//! Get the signed value of a disassembled constant.
int64_t getAsmSignedConstant(SgAsmValueExpression *e);
#endif
//! Function to add "C" style comment to statement.
void addMessageStatement( SgStatement* stmt, std::string message );
//! A persistent attribute to represent a unique name for an expression
class UniqueNameAttribute : public AstAttribute
{
private:
std::string name;
public:
UniqueNameAttribute(std::string n="") {name =n; };
void set_name (std::string n) {name = n;};
std::string get_name () {return name;};
};
// DQ (3/2/2009): Added support for collectiong an merging the referenced symbols in the outlined
// function into the list used to edit the outlined code subtree to fixup references (from symbols
// in the original file to the symbols in the newer separate file).
// typedef rose_hash::unordered_map<SgNode*, SgNode*, hash_nodeptr> ReplacementMapType;
// void supplementReplacementSymbolMap ( const ReplacementMapTraversal::ReplacementMapType & inputReplacementMap );
// CH (4/9/2010): Use boost::hash instead
//#ifdef _MSC_VER
#if 0
inline size_t hash_value(SgNode* t) {return (size_t)t;}
#endif
#if 0
// DQ (8/3/2015): We expect that this is not used and is generating a warnings so we
// can best fix it by removing it.
struct hash_nodeptr
{
// CH (4/9/2010): Use boost::hash instead
//#ifndef _MSC_VER
#if 0
//rose_hash::hash<char*> hasher;
#endif
public:
size_t operator()(SgNode* node) const
{
// CH (4/9/2010): Use boost::hash instead
//#ifdef _MSC_VER
#if 0
return (size_t) hash_value(node);
#else
return (size_t) node;
#endif
}
};
#ifndef SWIG
// DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time).
void supplementReplacementSymbolMap ( rose_hash::unordered_map<SgNode*, SgNode*, hash_nodeptr> & inputReplacementMap );
#endif
#endif
//------------------------------------------------------------------------
//@{
/*! @name Symbol tables
\brief utility functions for symbol tables
*/
// Liao 1/22/2008, used for get symbols for generating variable reference nodes
// ! Find a variable symbol in current and ancestor scopes for a given name
ROSE_DLL_API SgVariableSymbol *lookupVariableSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
//! Find a symbol in current and ancestor scopes for a given variable name, starting from top of ScopeStack if currentscope is not given or NULL.
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList);
ROSE_DLL_API SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
// DQ (11/24/2007): Functions moved from the Fortran support so that they could be called from within astPostProcessing.
//!look up the first matched function symbol in parent scopes given only a function name, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName, SgScopeStatement *currentScope=NULL);
// Liao, 1/24/2008, find exact match for a function
//!look up function symbol in parent scopes given both name and function type, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName,
const SgType* t,
SgScopeStatement *currentScope=NULL);
ROSE_DLL_API SgFunctionSymbol *lookupTemplateFunctionSymbolInParentScopes (const SgName & functionName, SgFunctionType * ftype, SgTemplateParameterPtrList * tplparams, SgScopeStatement *currentScope=NULL);
ROSE_DLL_API SgFunctionSymbol *lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & functionName, SgFunctionType * ftype, SgTemplateParameterPtrList * tplparams, SgScopeStatement *currentScope=NULL);
ROSE_DLL_API SgTemplateVariableSymbol * lookupTemplateVariableSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList * tplparams, SgTemplateArgumentPtrList* tplargs, SgScopeStatement *currentScope=NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
// DQ (5/7/2011): Added support for SgClassSymbol (used in name qualification support).
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
ROSE_DLL_API SgTypedefSymbol* lookupTypedefSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgNonrealSymbol* lookupNonrealSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
#if 0
// DQ (8/13/2013): This function does not make since any more, now that we have made the symbol
// table handling more precise and we have to provide template parameters for any template lookup.
// We also have to know if we want to lookup template classes, template functions, or template
// member functions (since each have specific requirements).
SgTemplateSymbol* lookupTemplateSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
#endif
#if 0
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
// Where these are called we might not know enough information about the template parameters or function
// types, for example.
SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
SgTemplateFunctionSymbol* lookupTemplateFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
SgTemplateMemberFunctionSymbol* lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
#endif
// DQ (8/21/2013): Modified to make some of the newest function parameters be default arguments.
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
ROSE_DLL_API SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList, SgScopeStatement *cscope = NULL);
ROSE_DLL_API SgEnumSymbol* lookupEnumSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgNamespaceSymbol* lookupNamespaceSymbolInParentScopes(const SgName & name, SgScopeStatement *currentScope = NULL);
// DQ (7/17/2011): Added function from cxx branch that I need here for the Java support.
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *cscope);
/*! \brief set_name of symbol in symbol table.
This function extracts the symbol from the relavant symbol table,
changes the name (at the declaration) and reinserts it into the
symbol table.
\internal I think this is what this function does, I need to double check.
*/
// DQ (12/9/2004): Moved this function (by Alin Jula) from being a member of SgInitializedName
// to this location where it can be a part of the interface for the Sage III AST.
ROSE_DLL_API int set_name (SgInitializedName * initializedNameNode, SgName new_name);
/*! \brief Output function type symbols in global function type symbol table.
*/
void outputGlobalFunctionTypeSymbolTable ();
// DQ (6/27/2005):
/*! \brief Output the local symbol tables.
\implementation Each symbol table is output with the file infor where it is located in the source code.
*/
ROSE_DLL_API void outputLocalSymbolTables (SgNode * node);
class OutputLocalSymbolTables:public AstSimpleProcessing
{
public:
void visit (SgNode * node);
};
/*! \brief Regenerate the symbol table.
\implementation current symbol table must be NULL pointer before calling this
function (for safety, but is this a good idea?)
*/
// DQ (9/28/2005):
void rebuildSymbolTable (SgScopeStatement * scope);
/*! \brief Clear those variable symbols with unknown type (together with initialized names) which are also not referenced by any variable references or declarations under root. If root is NULL, all symbols with unknown type will be deleted.
*/
void clearUnusedVariableSymbols (SgNode* root = NULL);
// DQ (3/1/2009):
//! All the symbol table references in the copied AST need to be reset after rebuilding the copied scope's symbol table.
void fixupReferencesToSymbols( const SgScopeStatement* this_scope, SgScopeStatement* copy_scope, SgCopyHelp & help );
//@}
//------------------------------------------------------------------------
//@{
/*! @name Stringify
\brief Generate a useful string (name) to describe a SgNode
*/
/*! \brief Generate a useful name to describe the SgNode
\internal default names are used for SgNode objects that can not be associated with a name.
*/
// DQ (9/21/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgNode * node);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgStatement * stmt);
/*! \brief Generate a useful name to describe the expression
\internal default names are used for expressions that can not be associated with a name.
*/
std::string get_name (const SgExpression * expr);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgDeclarationStatement * declaration);
/*! \brief Generate a useful name to describe the scope
\internal default names are used for scope that cannot be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgScopeStatement * scope);
/*! \brief Generate a useful name to describe the SgSymbol
\internal default names are used for SgSymbol objects that cannot be associated with a name.
*/
// DQ (2/11/2007): Added this function to make debugging support more complete (useful for symbol table debugging support).
std::string get_name (const SgSymbol * symbol);
/*! \brief Generate a useful name to describe the SgType
\internal default names are used for SgType objects that cannot be associated with a name.
*/
std::string get_name (const SgType * type);
/*! \brief Generate a useful name to describe the SgSupport IR node
*/
std::string get_name (const SgSupport * node);
/*! \brief Generate a useful name to describe the SgLocatedNodeSupport IR node
*/
std::string get_name (const SgLocatedNodeSupport * node);
/*! \brief Generate a useful name to describe the SgC_PreprocessorDirectiveStatement IR node
*/
std::string get_name ( const SgC_PreprocessorDirectiveStatement* directive );
/*! \brief Generate a useful name to describe the SgToken IR node
*/
std::string get_name ( const SgToken* token );
// DQ (3/20/2016): Added to refactor some of the DSL infrastructure support.
/*! \brief Generate a useful name to support construction of identifiers from declarations.
This function permits names to be generated that will be unique across translation units
(a specific requirement different from the context of the get_name() functions above).
\internal This supports only a restricted set of declarations presently.
*/
std::string generateUniqueNameForUseAsIdentifier ( SgDeclarationStatement* declaration );
std::string generateUniqueNameForUseAsIdentifier_support ( SgDeclarationStatement* declaration );
/*! \brief Global map of name collisions to support generateUniqueNameForUseAsIdentifier() function.
*/
extern std::map<std::string,int> local_name_collision_map;
extern std::map<std::string,SgNode*> local_name_to_node_map;
extern std::map<SgNode*,std::string> local_node_to_name_map;
/*! \brief Traversal to set the global map of names to node and node to names.collisions to support generateUniqueNameForUseAsIdentifier() function.
*/
void computeUniqueNameForUseAsIdentifier( SgNode* astNode );
/*! \brief Reset map variables used to support generateUniqueNameForUseAsIdentifier() function.
*/
void reset_name_collision_map();
//@}
//------------------------------------------------------------------------
//@{
/*! @name Class utilities
\brief
*/
/*! \brief Get the default destructor from the class declaration
*/
// DQ (6/21/2005): Get the default destructor from the class declaration
SgMemberFunctionDeclaration *getDefaultDestructor (SgClassDeclaration *
classDeclaration);
/*! \brief Get the default constructor from the class declaration
*/
// DQ (6/22/2005): Get the default constructor from the class declaration
ROSE_DLL_API SgMemberFunctionDeclaration *getDefaultConstructor (SgClassDeclaration *
classDeclaration);
/*! \brief Return true if template definition is in the class, false if outside of class.
*/
// DQ (8/27/2005):
bool templateDefinitionIsInClass (SgTemplateInstantiationMemberFunctionDecl
* memberFunctionDeclaration);
/*! \brief Generate a non-defining (forward) declaration from a defining function declaration.
\internal should put into sageBuilder ?
*/
// DQ (9/17/2005):
SgTemplateInstantiationMemberFunctionDecl*
buildForwardFunctionDeclaration
(SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! Check if a SgNode is a declaration for a structure
bool isStructDeclaration(SgNode * node);
//! Check if a SgNode is a declaration for a union
bool isUnionDeclaration(SgNode * node);
#if 0
// DQ (8/28/2005): This is already a member function of the SgFunctionDeclaration
// (so that it can handle template functions and member functions)
/*! \brief Return true if member function of a template member function,
of false if a non-template member function in a templated class.
*/
// DQ (8/27/2005):
bool isTemplateMemberFunction (SgTemplateInstantiationMemberFunctionDecl *
memberFunctionDeclaration);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Misc.
\brief Not sure the classifications right now
*/
//! Save AST into a pdf file. Start from a node to find its enclosing file node. The entire file's AST will be saved into a pdf.
void saveToPDF(SgNode* node, std::string filename);
void saveToPDF(SgNode* node); // enable calling from gdb
// DQ (2/12/2012): Added some diagnostic support.
//! Diagnostic function for tracing back through the parent list to understand at runtime where in the AST a failure happened.
void whereAmI(SgNode* node);
//! Extract a SgPragmaDeclaration's leading keyword . For example "#pragma omp parallel" has a keyword of "omp".
std::string extractPragmaKeyword(const SgPragmaDeclaration *);
//! Check if a node is SgOmp*Statement
ROSE_DLL_API bool isOmpStatement(SgNode* );
/*! \brief Return true if function is overloaded.
*/
// DQ (8/27/2005):
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
// DQ (2/14/2012): Added support function used for variable declarations in conditionals.
//! Support function used for variable declarations in conditionals
void initializeIfStmt(SgIfStmt *ifstmt, SgStatement* conditional, SgStatement * true_body, SgStatement * false_body);
//! Support function used for variable declarations in conditionals
void initializeSwitchStatement(SgSwitchStatement* switchStatement,SgStatement *item_selector,SgStatement *body);
//! Support function used for variable declarations in conditionals
void initializeWhileStatement(SgWhileStmt* whileStatement, SgStatement * condition, SgStatement *body, SgStatement *else_body);
//! Generate unique names for expressions and attach the names as persistent attributes ("UniqueNameAttribute")
void annotateExpressionsWithUniqueNames (SgProject* project);
//! Check if a SgNode is a main() function declaration
ROSE_DLL_API bool isMain (const SgNode* node);
// DQ (6/22/2005):
/*! \brief Generate unique name from C and C++ constructs. The name may contain space.
This is support for the AST merge, but is generally useful as a more general mechanism than
name mangling which is more closely ties to the generation of names to support link-time function name
resolution. This is more general than common name mangling in that it resolves more relevant differences
between C and C++ declarations. (e.g. the type within the declaration: "struct { int:8; } foo;").
\implementation current work does not support expressions.
*/
std::string generateUniqueName ( const SgNode * node, bool ignoreDifferenceBetweenDefiningAndNondefiningDeclarations);
/** Generate a name like __temp#__ that is unique in the current scope and any parent and children scopes. # is a unique integer counter.
* @param baseName the word to be included in the variable names. */
std::string generateUniqueVariableName(SgScopeStatement* scope, std::string baseName = "temp");
// DQ (8/10/2010): Added const to first parameter.
// DQ (3/10/2007):
//! Generate a unique string from the source file position information
std::string declarationPositionString (const SgDeclarationStatement * declaration);
// DQ (1/20/2007):
//! Added mechanism to generate project name from list of file names
ROSE_DLL_API std::string generateProjectName (const SgProject * project, bool supressSuffix = false );
//! Given a SgExpression that represents a named function (or bound member
//! function), return the mentioned function
SgFunctionDeclaration* getDeclarationOfNamedFunction(SgExpression* func);
//! Get the mask expression from the header of a SgForAllStatement
SgExpression* forallMaskExpression(SgForAllStatement* stmt);
//! Find all SgPntrArrRefExp under astNode, then add SgVarRefExp (if any) of SgPntrArrRefExp's dim_info into NodeList_t
void addVarRefExpFromArrayDimInfo(SgNode * astNode, Rose_STL_Container<SgNode *>& NodeList_t);
// DQ (10/6/2006): Added support for faster mangled name generation (caching avoids recomputation).
/*! \brief Support for faster mangled name generation (caching avoids recomputation).
*/
#ifndef SWIG
// DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time).
void clearMangledNameCache (SgGlobal * globalScope);
void resetMangledNameCache (SgGlobal * globalScope);
#endif
std::string getMangledNameFromCache (SgNode * astNode);
std::string addMangledNameToCache (SgNode * astNode, const std::string & mangledName);
SgDeclarationStatement * getNonInstantiatonDeclarationForClass (SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! a better version for SgVariableDeclaration::set_baseTypeDefininingDeclaration(), handling all side effects automatically
//! Used to have a struct declaration embedded into a variable declaration
void setBaseTypeDefiningDeclaration(SgVariableDeclaration* var_decl, SgDeclarationStatement *base_decl);
// DQ (10/14/2006): This function tests the AST to see if for a non-defining declaration, the
// bool declarationPreceedsDefinition ( SgClassDeclaration* classNonDefiningDeclaration, SgClassDeclaration* classDefiningDeclaration );
//! Check if a defining declaration comes before of after the non-defining declaration.
bool declarationPreceedsDefinition (SgDeclarationStatement *nonDefiningDeclaration, SgDeclarationStatement *definingDeclaration);
// DQ (10/19/2006): Function calls have interesting context dependent rules to determine if
// they are output with a global qualifier or not. Were this is true we have to avoid global
// qualifiers, since the function's scope has not been defined. This is an example of where
// qualification of function names in function calls are context dependent; an interesting
// example of where the C++ language is not friendly to source-to-source processing :-).
bool functionCallExpressionPreceedsDeclarationWhichAssociatesScope (SgFunctionCallExp * functionCall);
/*! \brief Compute the intersection set for two ASTs.
This is part of a test done by the copy function to compute those IR nodes in the copy that still reference the original AST.
*/
ROSE_DLL_API std::vector < SgNode * >astIntersection (SgNode * original, SgNode * copy, SgCopyHelp * help = NULL);
//! Deep copy an arbitrary subtree
ROSE_DLL_API SgNode* deepCopyNode (const SgNode* subtree);
//! A template function for deep copying a subtree. It is also used to create deepcopy functions with specialized parameter and return types. e.g SgExpression* copyExpression(SgExpression* e);
template <typename NodeType>
NodeType* deepCopy (const NodeType* subtree) {
return dynamic_cast<NodeType*>(deepCopyNode(subtree));
}
//! Deep copy an expression
ROSE_DLL_API SgExpression* copyExpression(SgExpression* e);
//!Deep copy a statement
ROSE_DLL_API SgStatement* copyStatement(SgStatement* s);
// from VarSym.cc in src/midend/astOutlining/src/ASTtools
//! Get the variable symbol for the first initialized name of a declaration stmt.
ROSE_DLL_API SgVariableSymbol* getFirstVarSym (SgVariableDeclaration* decl);
//! Get the first initialized name of a declaration statement
ROSE_DLL_API SgInitializedName* getFirstInitializedName (SgVariableDeclaration* decl);
//! A special purpose statement removal function, originally from inlinerSupport.h, Need Jeremiah's attention to refine it. Please don't use it for now.
ROSE_DLL_API void myRemoveStatement(SgStatement* stmt);
ROSE_DLL_API bool isConstantTrue(SgExpression* e);
ROSE_DLL_API bool isConstantFalse(SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(SgFunctionDeclaration* decl, SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(const std::string& qualifiedName, size_t arity, SgExpression* e);
//! Check if a declaration has a "static' modifier
bool ROSE_DLL_API isStatic(SgDeclarationStatement* stmt);
//! Set a declaration as static
ROSE_DLL_API void setStatic(SgDeclarationStatement* stmt);
//! Check if a declaration has an "extern" modifier
ROSE_DLL_API bool isExtern(SgDeclarationStatement* stmt);
//! Set a declaration as extern
ROSE_DLL_API void setExtern(SgDeclarationStatement* stmt);
//! Interface for creating a statement whose computation writes its answer into
//! a given variable.
class StatementGenerator {
public:
virtual ~StatementGenerator() {};
virtual SgStatement* generate(SgExpression* where_to_write_answer) = 0;
};
//! Check if a SgNode _s is an assignment statement (any of =,+=,-=,&=,/=, ^=, etc)
//!
//! Return the left hand, right hand expressions and if the left hand variable is also being read
bool isAssignmentStatement(SgNode* _s, SgExpression** lhs=NULL, SgExpression** rhs=NULL, bool* readlhs=NULL);
//! Variable references can be introduced by SgVarRef, SgPntrArrRefExp, SgInitializedName, SgMemberFunctionRef etc. For Dot and Arrow Expressions, their lhs is used to obtain SgInitializedName (coarse grain) by default. Otherwise, fine-grain rhs is used.
ROSE_DLL_API SgInitializedName* convertRefToInitializedName(SgNode* current, bool coarseGrain=true);
//! Build an abstract handle from an AST node, reuse previously built handle when possible
ROSE_DLL_API AbstractHandle::abstract_handle* buildAbstractHandle(SgNode*);
//! Obtain a matching SgNode from an abstract handle string
ROSE_DLL_API SgNode* getSgNodeFromAbstractHandleString(const std::string& input_string);
//! Dump information about a SgNode for debugging
ROSE_DLL_API void dumpInfo(SgNode* node, std::string desc="");
//! Reorder a list of declaration statements based on their appearance order in source files
ROSE_DLL_API std::vector<SgDeclarationStatement*>
sortSgNodeListBasedOnAppearanceOrderInSource(const std::vector<SgDeclarationStatement*>& nodevec);
// DQ (4/13/2013): We need these to support the unparing of operators defined by operator syntax or member function names.
//! Is an overloaded operator a prefix operator (e.g. address operator X * operator&(), dereference operator X & operator*(), unary plus operator X & operator+(), etc.
// bool isPrefixOperator( const SgMemberFunctionRefExp* memberFunctionRefExp );
bool isPrefixOperator( SgExpression* exp );
//! Check for proper names of possible prefix operators (used in isPrefixOperator()).
bool isPrefixOperatorName( const SgName & functionName );
//! Is an overloaded operator a postfix operator. (e.g. ).
bool isPostfixOperator( SgExpression* exp );
//! Is an overloaded operator an index operator (also referred to as call or subscript operators). (e.g. X & operator()() or X & operator[]()).
bool isIndexOperator( SgExpression* exp );
// DQ (1/10/2014): Adding more general support for token based unparsing.
//! Used to support token unparsing (when the output the trailing token sequence).
SgStatement* lastStatementOfScopeWithTokenInfo (SgScopeStatement* scope, std::map<SgNode*,TokenStreamSequenceToNodeMapping*> & tokenStreamSequenceMap);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST properties
\brief version, language properties of current AST.
*/
// std::string version(); // utility_functions.h, version number
/*! Brief These traverse the memory pool of SgFile IR nodes and determine what languages are in use!
*/
ROSE_DLL_API bool is_Ada_language ();
ROSE_DLL_API bool is_C_language ();
ROSE_DLL_API bool is_Cobol_language ();
ROSE_DLL_API bool is_OpenMP_language ();
ROSE_DLL_API bool is_UPC_language ();
//! Check if dynamic threads compilation is used for UPC programs
ROSE_DLL_API bool is_UPC_dynamic_threads();
ROSE_DLL_API bool is_C99_language ();
ROSE_DLL_API bool is_Cxx_language ();
ROSE_DLL_API bool is_Java_language ();
ROSE_DLL_API bool is_Jovial_language ();
ROSE_DLL_API bool is_Fortran_language ();
ROSE_DLL_API bool is_CAF_language ();
ROSE_DLL_API bool is_PHP_language();
ROSE_DLL_API bool is_Python_language();
ROSE_DLL_API bool is_Cuda_language();
ROSE_DLL_API bool is_OpenCL_language();
ROSE_DLL_API bool is_X10_language();
ROSE_DLL_API bool is_binary_executable();
ROSE_DLL_API bool is_mixed_C_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_and_Cxx_language ();
//@}
//------------------------------------------------------------------------
//@{
/*! @name Scope
\brief
*/
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Assigns unique numbers to each SgScopeStatement of a function.
This is used to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void resetScopeNumbers (SgFunctionDefinition * functionDeclaration);
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Clears the cache of scope,integer pairs for the input function.
This is used to clear the cache of computed unique labels for scopes in a function.
This function should be called after any transformation on a function that might effect
the allocation of scopes and cause the existing unique numbers to be incorrect.
This is part of support to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void clearScopeNumbers (SgFunctionDefinition * functionDefinition);
//!Find the enclosing namespace of a declaration
SgNamespaceDefinitionStatement * enclosingNamespaceScope (SgDeclarationStatement * declaration);
// SgNamespaceDefinitionStatement * getEnclosingNamespaceScope (SgNode * node);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
//!check if node1 is a strict ancestor of node 2. (a node is not considered its own ancestor)
bool ROSE_DLL_API isAncestor(SgNode* node1, SgNode* node2);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Preprocessing Information
\brief #if-#else-#end, comments, #include, etc
*/
//! Dumps a located node's preprocessing information.
void dumpPreprocInfo (SgLocatedNode* locatedNode);
//! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file, add to be the last #include .. by default among existing headers, Or as the first header. Recommended for use.
PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader, bool asLastHeader);
//! Insert a new header right before stmt, if there are existing headers attached to stmt, insert it as the last or first header as specified by asLastHeader
void insertHeader (SgStatement* stmt, PreprocessingInfo* newheader, bool asLastHeader);
//! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file
PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader = false, PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before);
//! Insert #include "filename" or #include <filename> (system header) into the global scope containing the current scope, right after other #include XXX.
ROSE_DLL_API PreprocessingInfo* insertHeader(const std::string& filename, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::after, bool isSystemHeader=false, SgScopeStatement* scope=NULL);
//! Identical to movePreprocessingInfo(), except for the stale name and confusing order of parameters. It will be deprecated soon.
ROSE_DLL_API void moveUpPreprocessingInfo (SgStatement* stmt_dst, SgStatement* stmt_src, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef, PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//! Move preprocessing information of stmt_src to stmt_dst, Only move preprocessing information from the specified source-relative position to a specified target position, otherwise move all preprocessing information with position information intact. The preprocessing information is appended to the existing preprocessing information list of the target node by default. Prepending is used if usePreprend is set to true. Optionally, the relative position can be adjust after the moving using dst_position.
ROSE_DLL_API void movePreprocessingInfo (SgStatement* stmt_src, SgStatement* stmt_dst, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef,
PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//!Cut preprocessing information from a source node and save it into a buffer. Used in combination of pastePreprocessingInfo(). The cut-paste operation is similar to moveUpPreprocessingInfo() but it is more flexible in that the destination node can be unknown during the cut operation.
ROSE_DLL_API void cutPreprocessingInfo (SgLocatedNode* src_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& save_buf);
//!Paste preprocessing information from a buffer to a destination node. Used in combination of cutPreprocessingInfo()
ROSE_DLL_API void pastePreprocessingInfo (SgLocatedNode* dst_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& saved_buf);
//! Attach an arbitrary string to a located node. A workaround to insert irregular statements or vendor-specific attributes.
ROSE_DLL_API PreprocessingInfo* attachArbitraryText(SgLocatedNode* target,
const std::string & text,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before);
//!Check if a pragma declaration node has macro calls attached, if yes, replace macro calls within the pragma string with expanded strings. This only works if -rose:wave is turned on.
ROSE_DLL_API void replaceMacroCallsWithExpandedStrings(SgPragmaDeclaration* target);
//@}
//! Build and attach comment onto the global scope of a source file
PreprocessingInfo* attachComment(
SgSourceFile * source_file,
const std::string & content,
PreprocessingInfo::DirectiveType directive_type = PreprocessingInfo::C_StyleComment,
PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before
);
//! Build and attach comment, comment style is inferred from the language type of the target node if not provided
ROSE_DLL_API PreprocessingInfo* attachComment(SgLocatedNode* target, const std::string & content,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before,
PreprocessingInfo::DirectiveType dtype= PreprocessingInfo::CpreprocessorUnknownDeclaration);
// DQ (7/20/2008): I am not clear were I should put this function, candidates include: SgLocatedNode or SgInterface
//! Add a string to be unparsed to support code generation for back-end specific tools or compilers.
ROSE_DLL_API void addTextForUnparser ( SgNode* astNode, std::string s, AstUnparseAttribute::RelativePositionType inputlocation );
/**
* Add preproccessor guard around a given node.
* It surrounds the node with "#if guard" and "#endif"
*/
void guardNode(SgLocatedNode * target, std::string guard);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Source File Position
\brief set Sg_File_Info for a SgNode
*/
// ************************************************************************
// Newer versions of now depricated functions
// ************************************************************************
// DQ (5/1/2012): This function queries the SageBuilder::SourcePositionClassification mode (stored in the SageBuilder
// interface) and used the specified mode to initialize the source position data (Sg_File_Info objects). This
// function is the only function that should be called directly (though in a namespace we can't define permissions).
//! Set the source code positon for the current (input) node.
ROSE_DLL_API void setSourcePosition(SgNode* node);
// A better name might be "setSourcePositionForSubTree"
//! Set the source code positon for the subtree (including the root).
ROSE_DLL_API void setSourcePositionAtRootAndAllChildren(SgNode *root);
//! DQ (5/1/2012): New function with improved name.
void setSourcePositionAsTransformation(SgNode *node);
// DQ (5/1/2012): Newly renamed function (previous name preserved for backward compatability).
void setSourcePositionPointersToNull(SgNode *node);
// ************************************************************************
// ************************************************************************
// Older deprecated functions
// ************************************************************************
// Liao, 1/8/2007, set file info. for a whole subtree as transformation generated
//! Set current node's source position as transformation generated
ROSE_DLL_API void setOneSourcePositionForTransformation(SgNode *node);
//! Set current node's source position as NULL
ROSE_DLL_API void setOneSourcePositionNull(SgNode *node);
//! Recursively set source position info(Sg_File_Info) as transformation generated
ROSE_DLL_API void setSourcePositionForTransformation (SgNode * root);
//! Set source position info(Sg_File_Info) as transformation generated for all SgNodes in memory pool
// ROSE_DLL_API void setSourcePositionForTransformation_memoryPool();
//! Check if a node is from a system header file
ROSE_DLL_API bool insideSystemHeader (SgLocatedNode* node);
//! Set the source position of SgLocatedNode to Sg_File_Info::generateDefaultFileInfo(). These nodes WILL be unparsed. Not for transformation usage.
// ROSE_DLL_API void setSourcePosition (SgLocatedNode * locatedNode);
// ************************************************************************
//@}
//------------------------------------------------------------------------
//@{
/*! @name Data types
\brief
*/
// from src/midend/astInlining/typeTraits.h
// src/midend/astUtil/astInterface/AstInterface.h
//! Get the right bool type according to C or C++ language input
SgType* getBoolType(SgNode* n);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
////!
////! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool to be treated as integer types
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//!Get the data type of the first initialized name of a declaration statement
ROSE_DLL_API SgType* getFirstVarType(SgVariableDeclaration* decl);
//! Is a type default constructible? This may not quite work properly.
ROSE_DLL_API bool isDefaultConstructible(SgType* type);
//! Is a type copy constructible? This may not quite work properly.
ROSE_DLL_API bool isCopyConstructible(SgType* type);
//! Is a type assignable? This may not quite work properly.
ROSE_DLL_API bool isAssignable(SgType* type);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//! Check if a class type is a pure virtual class. True means that there is at least
//! one pure virtual function that has not been overridden.
//! In the case of an incomplete class type (forward declaration), this function returns false.
ROSE_DLL_API bool isPureVirtualClass(SgType* type, const ClassHierarchyWrapper& classHierarchy);
#endif
//! Does a type have a trivial (built-in) destructor?
ROSE_DLL_API bool hasTrivialDestructor(SgType* t);
//! Is this type a non-constant reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isNonconstReference(SgType* t);
//! Is this type a const or non-const reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isReferenceType(SgType* t);
//! Is this type a pointer type? (Handles typedefs correctly)
ROSE_DLL_API bool isPointerType(SgType* t);
//! Is this a pointer to a non-const type? Note that this function will return true for const pointers pointing to
//! non-const types. For example, (int* const y) points to a modifiable int, so this function returns true. Meanwhile,
//! it returns false for (int const * x) and (int const * const x) because these types point to a const int.
//! Also, only the outer layer of nested pointers is unwrapped. So the function returns true for (const int ** y), but returns
//! false for const (int * const * x)
ROSE_DLL_API bool isPointerToNonConstType(SgType* type);
//! Is this a const type?
/* const char* p = "aa"; is not treated as having a const type. It is a pointer to const char.
* Similarly, neither for const int b[10]; or const int & c =10;
* The standard says, "A compound type is not cv-qualified by the cv-qualifiers (if any) of
the types from which it is compounded. Any cv-qualifiers applied to an array type affect the array element type, not the array type".
*/
ROSE_DLL_API bool isConstType(SgType* t);
//! Remove const (if present) from a type. stripType() cannot do this because it removes all modifiers.
SgType* removeConst(SgType* t);
//! Is this a volatile type?
ROSE_DLL_API bool isVolatileType(SgType* t);
//! Is this a restrict type?
ROSE_DLL_API bool isRestrictType(SgType* t);
//! Is this a scalar type?
/*! We define the following SgType as scalar types: char, short, int, long , void, Wchar, Float, double, long long, string, bool, complex, imaginary
*/
ROSE_DLL_API bool isScalarType(SgType* t);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
//!
//! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool.
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//! Check if a type is a struct type (a special SgClassType in ROSE)
ROSE_DLL_API bool isStructType(SgType* t);
//! Generate a mangled string for a given type based on Itanium C++ ABI
ROSE_DLL_API std::string mangleType(SgType* type);
//! Generate mangled scalar type names according to Itanium C++ ABI, the input type should pass isScalarType() in ROSE
ROSE_DLL_API std::string mangleScalarType(SgType* type);
//! Generated mangled modifier types, include const, volatile,according to Itanium C++ ABI, with extension to handle UPC shared types.
ROSE_DLL_API std::string mangleModifierType(SgModifierType* type);
//! Calculate the number of elements of an array type: dim1* dim2*... , assume element count is 1 for int a[]; Strip off THREADS if it is a UPC array.
ROSE_DLL_API size_t getArrayElementCount(SgArrayType* t);
//! Get the number of dimensions of an array type
ROSE_DLL_API int getDimensionCount(SgType* t);
//! Get the element type of an array. It recursively find the base type for multi-dimension array types
ROSE_DLL_API SgType* getArrayElementType(SgType* t);
//! Get the element type of an array, pointer or string, or NULL if not applicable. This function only check one level base type. No recursion.
ROSE_DLL_API SgType* getElementType(SgType* t);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// Note, the first entry of the array is a SgNullExpression, iff the
/// first array dimension was not specified.
/// \code
/// int x[] = { 1, 2, 3 };
/// \endcode
/// note, the expression does not have to be a constant
/// \code
/// int x[i*5];
/// \endcode
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \param varref a reference to an array variable (the variable of type arrtype)
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// If the first array dimension was not specified an expression
/// that indicates that size is generated.
/// \code
/// int x[][3] = { 1, 2, 3, 4, 5, 6 };
/// \endcode
/// the entry for the first dimension will be:
/// \code
/// // 3 ... size of 2nd dimension
/// sizeof(x) / (sizeof(int) * 3)
/// \endcode
/// \pre arrtype is the array-type of varref
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
/// \post !isSgNullExpression(return-value[*])
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, const SgVarRefExp& varref);
/// \overload
/// \note see get_C_array_dimensions for SgVarRefExp for details.
/// \todo make initname const
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, SgInitializedName& initname);
//! Check if an expression is an array access (SgPntrArrRefExp). If so, return its name expression and subscripts if requested. Users can use convertRefToInitializedName() to get the possible name. It does not check if the expression is a top level SgPntrArrRefExp.
ROSE_DLL_API bool isArrayReference(SgExpression* ref, SgExpression** arrayNameExp=NULL, std::vector<SgExpression*>** subscripts=NULL);
//! Collect variable references in array types. The default NodeQuery::querySubTree() will miss variables referenced in array type's index list. e.g. double *buffer = new double[numItems] ;
ROSE_DLL_API int collectVariableReferencesInArrayTypes (SgLocatedNode* root, Rose_STL_Container<SgNode*> & currentVarRefList);
//! Has a UPC shared type of any kinds (shared-to-shared, private-to-shared, shared-to-private, shared scalar/array)? An optional parameter, mod_type_out, stores the first SgModifierType with UPC access information.
/*!
* Note: we classify private-to-shared as 'has shared' type for convenience here. It is indeed a private type in strict sense.
AST graph for some examples:
- shared scalar: SgModifierType -->base type
- shared array: SgArrayType --> SgModiferType --> base type
- shared to shared: SgModifierType --> SgPointerType --> SgModifierType ->SgTypeInt
- shared to private: SgModifierType --> SgPointerType --> base type
- private to shared: SgPointerType --> SgModifierType --> base type
*/
ROSE_DLL_API bool hasUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL );
//! Check if a type is a UPC shared type, including shared array, shared pointers etc. Exclude private pointers to shared types. Optionally return the modifier type with the UPC shared property.
/*!
* ROSE uses SgArrayType of SgModifierType to represent shared arrays, not SgModifierType points to SgArrayType. Also typedef may cause a chain of nodes before reach the actual SgModifierType with UPC shared property.
*/
ROSE_DLL_API bool isUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL);
//! Check if a modifier type is a UPC shared type.
ROSE_DLL_API bool isUpcSharedModifierType (SgModifierType* mod_type);
//! Check if an array type is a UPC shared type. ROSE AST represents a UPC shared array as regular array of elements of UPC shared Modifier Type. Not directly a UPC shared Modifier Type of an array.
ROSE_DLL_API bool isUpcSharedArrayType (SgArrayType* array_type);
//! Check if a shared UPC type is strict memory consistency or not. Return false if it is relaxed. (So isUpcRelaxedSharedModifierType() is not necessary.)
ROSE_DLL_API bool isUpcStrictSharedModifierType(SgModifierType* mode_type);
//! Get the block size of a UPC shared modifier type
ROSE_DLL_API size_t getUpcSharedBlockSize(SgModifierType* mod_type);
//! Get the block size of a UPC shared type, including Modifier types and array of modifier types (shared arrays)
ROSE_DLL_API size_t getUpcSharedBlockSize(SgType* t);
//! Is UPC phase-less shared type? Phase-less means block size of the first SgModifierType with UPC information is 1 or 0/unspecified. Also return false if the type is not a UPC shared type.
ROSE_DLL_API bool isUpcPhaseLessSharedType (SgType* t);
//! Is a UPC private-to-shared pointer? SgPointerType comes first compared to SgModifierType with UPC information. Input type must be any of UPC shared types first.
ROSE_DLL_API bool isUpcPrivateToSharedType(SgType* t);
//! Is a UPC array with dimension of X*THREADS
ROSE_DLL_API bool isUpcArrayWithThreads(SgArrayType* t);
//! Lookup a named type based on its name, bottomup searching from a specified scope. Note name collison might be allowed for c (not C++) between typedef and enum/struct. Only the first matched named type will be returned in this case. typedef is returned as it is, not the base type it actually refers to.
ROSE_DLL_API SgType* lookupNamedTypeInParentScopes(const std::string& type_name, SgScopeStatement* scope=NULL);
// DQ (7/22/2014): Added support for comparing expression types in actual arguments with those expected from the formal function parameter types.
//! Get the type of the associated argument expression from the function type.
ROSE_DLL_API SgType* getAssociatedTypeFromFunctionTypeList(SgExpression* actual_argument_expression);
//! Verify that 2 SgTemplateArgument are equivalent (same type, same expression, or same template declaration)
ROSE_DLL_API bool templateArgumentEquivalence(SgTemplateArgument * arg1, SgTemplateArgument * arg2);
//! Verify that 2 SgTemplateArgumentPtrList are equivalent.
ROSE_DLL_API bool templateArgumentListEquivalence(const SgTemplateArgumentPtrList & list1, const SgTemplateArgumentPtrList & list2);
//! Test for equivalence of types independent of access permissions (private or protected modes for members of classes).
ROSE_DLL_API bool isEquivalentType (const SgType* lhs, const SgType* rhs);
//! Test if two types are equivalent SgFunctionType nodes. This is necessary for template function types
//! They may differ in one SgTemplateType pointer but identical otherwise.
ROSE_DLL_API bool isEquivalentFunctionType (const SgFunctionType* lhs, const SgFunctionType* rhs);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Loop handling
\brief
*/
// by Jeremiah
//! Add a step statement to the end of a loop body
//! Add a new label to the end of the loop, with the step statement after
//! it; then change all continue statements in the old loop body into
//! jumps to the label
//!
//! For example:
//! while (a < 5) {if (a < -3) continue;} (adding "a++" to end) becomes
//! while (a < 5) {if (a < -3) goto label; label: a++;}
ROSE_DLL_API void addStepToLoopBody(SgScopeStatement* loopStmt, SgStatement* step);
ROSE_DLL_API void moveForStatementIncrementIntoBody(SgForStatement* f);
ROSE_DLL_API void convertForToWhile(SgForStatement* f);
ROSE_DLL_API void convertAllForsToWhiles(SgNode* top);
//! Change continue statements in a given block of code to gotos to a label
ROSE_DLL_API void changeContinuesToGotos(SgStatement* stmt, SgLabelStatement* label);
//!Return the loop index variable for a for loop
ROSE_DLL_API SgInitializedName* getLoopIndexVariable(SgNode* loop);
//!Check if a SgInitializedName is used as a loop index within a AST subtree
//! This function will use a bottom-up traverse starting from the subtree_root to find all enclosing loops and check if ivar is used as an index for either of them.
ROSE_DLL_API bool isLoopIndexVariable(SgInitializedName* ivar, SgNode* subtree_root);
//! Check if a for loop uses C99 style initialization statement with multiple expressions like for (int i=0, j=0; ..) or for (i=0,j=0;...)
/*!
for (int i=0, j=0; ..) is stored as two variable declarations under SgForInitStatement's init_stmt member
for (i=0,j=0;...) is stored as a single expression statement, with comma expression (i=0,j=0).
*/
ROSE_DLL_API bool hasMultipleInitStatmentsOrExpressions (SgForStatement* for_loop);
//! Routines to get and set the body of a loop
ROSE_DLL_API SgStatement* getLoopBody(SgScopeStatement* loop);
ROSE_DLL_API void setLoopBody(SgScopeStatement* loop, SgStatement* body);
//! Routines to get the condition of a loop. It recognize While-loop, For-loop, and Do-While-loop
ROSE_DLL_API SgStatement* getLoopCondition(SgScopeStatement* loop);
//! Set the condition statement of a loop, including While-loop, For-loop, and Do-While-loop.
ROSE_DLL_API void setLoopCondition(SgScopeStatement* loop, SgStatement* cond);
//! Check if a for-loop has a canonical form, return loop index, bounds, step, and body if requested
//!
//! A canonical form is defined as : one initialization statement, a test expression, and an increment expression , loop index variable should be of an integer type. IsInclusiveUpperBound is true when <= or >= is used for loop condition
ROSE_DLL_API bool isCanonicalForLoop(SgNode* loop, SgInitializedName** ivar=NULL, SgExpression** lb=NULL, SgExpression** ub=NULL, SgExpression** step=NULL, SgStatement** body=NULL, bool *hasIncrementalIterationSpace = NULL, bool* isInclusiveUpperBound = NULL);
//! Check if a Fortran Do loop has a complete canonical form: Do I=1, 10, 1
ROSE_DLL_API bool isCanonicalDoLoop(SgFortranDo* loop,SgInitializedName** ivar/*=NULL*/, SgExpression** lb/*=NULL*/, SgExpression** ub/*=NULL*/, SgExpression** step/*=NULL*/, SgStatement** body/*=NULL*/, bool *hasIncrementalIterationSpace/*= NULL*/, bool* isInclusiveUpperBound/*=NULL*/);
//! Set the lower bound of a loop header for (i=lb; ...)
ROSE_DLL_API void setLoopLowerBound(SgNode* loop, SgExpression* lb);
//! Set the upper bound of a loop header,regardless the condition expression type. for (i=lb; i op up, ...)
ROSE_DLL_API void setLoopUpperBound(SgNode* loop, SgExpression* ub);
//! Set the stride(step) of a loop 's incremental expression, regardless the expression types (i+=s; i= i+s, etc)
ROSE_DLL_API void setLoopStride(SgNode* loop, SgExpression* stride);
//! Normalize loop init stmt by promoting the single variable declaration statement outside of the for loop header's init statement, e.g. for (int i=0;) becomes int i_x; for (i_x=0;..) and rewrite the loop with the new index variable, if necessary
ROSE_DLL_API bool normalizeForLoopInitDeclaration(SgForStatement* loop);
//! Undo the normalization of for loop's C99 init declaration. Previous record of normalization is used to ease the reverse transformation.
ROSE_DLL_API bool unnormalizeForLoopInitDeclaration(SgForStatement* loop);
//! Normalize a for loop, return true if successful. Generated constants will be fold by default.
//!
//! Translations are :
//! For the init statement: for (int i=0;... ) becomes int i; for (i=0;..)
//! For test expression:
//! i<x is normalized to i<= (x-1) and
//! i>x is normalized to i>= (x+1)
//! For increment expression:
//! i++ is normalized to i+=1 and
//! i-- is normalized to i+=-1
//! i-=s is normalized to i+= -s
ROSE_DLL_API bool forLoopNormalization(SgForStatement* loop, bool foldConstant = true);
//! Normalize a for loop's test expression
//! i<x is normalized to i<= (x-1) and
//! i>x is normalized to i>= (x+1)
ROSE_DLL_API bool normalizeForLoopTest(SgForStatement* loop);
ROSE_DLL_API bool normalizeForLoopIncrement(SgForStatement* loop);
//!Normalize a Fortran Do loop. Make the default increment expression (1) explicit
ROSE_DLL_API bool doLoopNormalization(SgFortranDo* loop);
//! Unroll a target loop with a specified unrolling factor. It handles steps larger than 1 and adds a fringe loop if the iteration count is not evenly divisible by the unrolling factor.
ROSE_DLL_API bool loopUnrolling(SgForStatement* loop, size_t unrolling_factor);
//! Interchange/permutate a n-level perfectly-nested loop rooted at 'loop' using a lexicographical order number within (0,depth!).
ROSE_DLL_API bool loopInterchange(SgForStatement* loop, size_t depth, size_t lexicoOrder);
//! Tile the n-level (starting from 1) loop of a perfectly nested loop nest using tiling size s
ROSE_DLL_API bool loopTiling(SgForStatement* loopNest, size_t targetLevel, size_t tileSize);
//Winnie Loop Collapsing
SgExprListExp * loopCollapsing(SgForStatement* target_loop, size_t collapsing_factor);
bool getForLoopInformations(
SgForStatement * for_loop,
SgVariableSymbol * & iterator,
SgExpression * & lower_bound,
SgExpression * & upper_bound,
SgExpression * & stride
);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Topdown search
\brief Top-down traversal from current node to find a node of a specified type
*/
//! Query a subtree to get all nodes of a given type, with an appropriate downcast.
template <typename NodeType>
std::vector<NodeType*> querySubTree(SgNode* top, VariantT variant = (VariantT)NodeType::static_variant)
{
#if 0
printf ("Top of SageInterface::querySubTree() \n");
#endif
Rose_STL_Container<SgNode*> nodes = NodeQuery::querySubTree(top,variant);
std::vector<NodeType*> result(nodes.size(), NULL);
int count = 0;
#if 0
printf ("In SageInterface::querySubTree(): before initialization loop \n");
#endif
for (Rose_STL_Container<SgNode*>::const_iterator i = nodes.begin(); i != nodes.end(); ++i, ++count)
{
#if 0
printf ("In SageInterface::querySubTree(): in loop: count = %d \n",count);
#endif
NodeType* node = dynamic_cast<NodeType*>(*i);
ROSE_ASSERT (node);
result[count] = node;
}
#if 0
printf ("Leaving SageInterface::querySubTree(): after initialization loop \n");
#endif
return result;
}
/*! \brief Returns STL vector of SgFile IR node pointers.
Demonstrates use of restricted traversal over just SgFile IR nodes.
*/
std::vector < SgFile * >generateFileList ();
/** Get the current SgProject IR Node.
*
* The library should never have more than one project and it asserts such. If no project has been created yet then this
* function returns the null pointer. */
ROSE_DLL_API SgProject * getProject();
//! \return the project associated with a node
SgProject * getProject(const SgNode * node);
//! Query memory pools to grab SgNode of a specified type
template <typename NodeType>
static std::vector<NodeType*> getSgNodeListFromMemoryPool()
{
// This function uses a memory pool traversal specific to the SgFile IR nodes
class MyTraversal : public ROSE_VisitTraversal
{
public:
std::vector<NodeType*> resultlist;
void visit ( SgNode* node)
{
NodeType* result = dynamic_cast<NodeType* > (node);
ROSE_ASSERT(result!= NULL);
if (result!= NULL)
{
resultlist.push_back(result);
}
};
virtual ~MyTraversal() {}
};
MyTraversal my_traversal;
NodeType::traverseMemoryPoolNodes(my_traversal);
return my_traversal.resultlist;
}
/*! \brief top-down traversal from current node to find the main() function declaration
*/
ROSE_DLL_API SgFunctionDeclaration* findMain(SgNode* currentNode);
//! Find the last declaration statement within a scope (if any). This is often useful to decide where to insert another variable declaration statement. Pragma declarations are not treated as a declaration by default in this context.
SgStatement* findLastDeclarationStatement(SgScopeStatement * scope, bool includePragma = false);
//midend/programTransformation/partialRedundancyElimination/pre.h
//! Find referenced symbols within an expression
std::vector<SgVariableSymbol*> getSymbolsUsedInExpression(SgExpression* expr);
//! Find break statements inside a particular statement, stopping at nested loops or switches
/*! loops or switch statements defines their own contexts for break
statements. The function will stop immediately if run on a loop or switch
statement. If fortranLabel is non-empty, breaks (EXITs) to that label within
nested loops are included in the returned list.
*/
std::vector<SgBreakStmt*> findBreakStmts(SgStatement* code, const std::string& fortranLabel = "");
//! Find all continue statements inside a particular statement, stopping at nested loops
/*! Nested loops define their own contexts for continue statements. The
function will stop immediately if run on a loop
statement. If fortranLabel is non-empty, continues (CYCLEs) to that label
within nested loops are included in the returned list.
*/
std::vector<SgContinueStmt*> findContinueStmts(SgStatement* code, const std::string& fortranLabel = "");
std::vector<SgGotoStatement*> findGotoStmts(SgStatement* scope, SgLabelStatement* l);
std::vector<SgStatement*> getSwitchCases(SgSwitchStatement* sw);
//! Collect all variable references in a subtree
void collectVarRefs(SgLocatedNode* root, std::vector<SgVarRefExp* >& result);
//! Topdown traverse a subtree from root to find the first declaration given its name, scope (optional, can be NULL), and defining or nondefining flag.
template <typename T>
T* findDeclarationStatement(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining)
{
bool found = false;
#if 0
printf ("In findDeclarationStatement(): root = %p \n",root);
printf ("In findDeclarationStatement(): name = %s \n",name.c_str());
printf ("In findDeclarationStatement(): scope = %p \n",scope);
printf ("In findDeclarationStatement(): isDefining = %s \n",isDefining ? "true" : "false");
#endif
// Do we really want a NULL pointer to be acceptable input to this function?
// Maybe we should have an assertion that it is non-null?
if (!root) return NULL;
T* decl = dynamic_cast<T*>(root);
#if 0
printf ("In findDeclarationStatement(): decl = %p \n",decl);
#endif
if (decl != NULL)
{
if (scope)
{
if ((decl->get_scope() == scope) && (decl->search_for_symbol_from_symbol_table()->get_name() == name))
{
found = true;
}
}
else // Liao 2/9/2010. We should allow NULL scope
{
#if 0
// DQ (12/6/2016): Include this into the debugging code to aboid compiler warning about unused variable.
SgSymbol* symbol = decl->search_for_symbol_from_symbol_table();
printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table() = %p \n",symbol);
printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table()->get_name() = %s \n",symbol->get_name().str());
#endif
if (decl->search_for_symbol_from_symbol_table()->get_name() == name)
{
found = true;
}
}
}
if (found)
{
if (isDefining)
{
#if 0
printf ("In findDeclarationStatement(): decl->get_firstNondefiningDeclaration() = %p \n",decl->get_firstNondefiningDeclaration());
printf ("In findDeclarationStatement(): decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration());
#endif
ROSE_ASSERT (decl->get_definingDeclaration() != NULL);
#if 0
printf ("In findDeclarationStatement(): returing decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration());
#endif
return dynamic_cast<T*> (decl->get_definingDeclaration());
}
else
{
#if 0
printf ("In findDeclarationStatement(): returing decl = %p \n",decl);
#endif
return decl;
}
}
std::vector<SgNode*> children = root->get_traversalSuccessorContainer();
#if 0
printf ("In findDeclarationStatement(): children.size() = %zu \n",children.size());
#endif
// DQ (4/10/2016): Note that if we are searching for a function member that has it's defining
// declaration defined outside of the class then it will not be found in the child list.
for (std::vector<SgNode*>::const_iterator i = children.begin(); i != children.end(); ++i)
{
T* target = findDeclarationStatement<T> (*i,name,scope,isDefining);
if (target)
{
return target;
}
}
return NULL;
}
//! Topdown traverse a subtree from root to find the first function declaration matching the given name, scope (optional, can be NULL), and defining or nondefining flag. This is an instantiation of findDeclarationStatement<T>.
SgFunctionDeclaration* findFunctionDeclaration(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining);
#if 0 //TODO
// 1. preorder traversal from current SgNode till find next SgNode of type V_SgXXX
// until reach the end node
SgNode* getNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
// 2. return all nodes of type VariantT following the source node
std::vector<SgNode*> getAllNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Bottom up search
\brief Backwards traverse through the AST to find a node, findEnclosingXXX()
*/
// remember to put const to all arguments.
/** Find a node by type using upward traversal.
*
* Traverse backward through a specified node's ancestors, starting with the node's parent and progressing to more distant
* ancestors, to find the first node matching the specified or derived type. If @p includingSelf is true then the
* starting node, @p astNode, is returned if its type matches, otherwise the search starts at the parent of @p astNode.
*
* For the purposes of this function, the parent (P) of an SgDeclarationStatement node (N) is considered to be the first
* non-defining declaration of N if N has both a defining declaration and a first non-defining declaration and the defining
* declaration is different than the first non-defining declaration.
*
* If no ancestor of the requisite type of subtypes is found then this function returns a null pointer.
*
* If @p astNode is the null pointer, then the return value is a null pointer. That is, if there is no node, then there cannot
* be an enclosing node of the specified type. */
template <typename NodeType>
NodeType* getEnclosingNode(const SgNode* astNode, const bool includingSelf = false)
{
#define DEBUG_GET_ENCLOSING_NODE 0
#if 1
// DQ (12/31/2019): This version does not detect a cycle that Robb's version detects in processing Cxx11_tests/test2016_23.C.
// This will have to be investigated seperately from the issue I am working on currently.
// DQ (10/20/2012): This is the older version of this implementation. Until I am sure that
// the newer version (below) is what we want to use I will resolve this conflict by keeping
// the previous version in place.
if (NULL == astNode)
{
return NULL;
}
if ( (includingSelf ) && (dynamic_cast<const NodeType*>(astNode)) )
{
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (astNode));
}
// DQ (3/5/2012): Check for reference to self...
ROSE_ASSERT(astNode->get_parent() != astNode);
SgNode* parent = astNode->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
SgNode* previouslySeenParent = parent;
bool foundCycle = false;
int counter = 0;
#if DEBUG_GET_ENCLOSING_NODE
printf ("In getEnclosingNode(): previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
while ( (foundCycle == false) && (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if DEBUG_GET_ENCLOSING_NODE
printf (" --- parent = %p = %s \n",parent,parent->class_name().c_str());
printf (" --- --- parent->get_parent() = %p = %s \n",parent->get_parent(),parent->get_parent()->class_name().c_str());
#endif
#if 1
// DQ (1/8/2020): ROSE-82 (on RZ) This limit needs to be larger and increasing it to 500 was enough
// for a specific code with a long chain of if-then-else nesting, So to make this sufficent for more
// general code we have increased the lomit to 100,000. Note that 50 was not enough for real code,
// but was enough for our regression tests.
// DQ (12/30/2019): This is added to support detection of infinite loops over parent pointers.
// if (counter >= 500)
if (counter >= 100000)
{
printf ("Exiting: In getEnclosingNode(): loop limit exceeded: counter = %d \n",counter);
ROSE_ASSERT(false);
}
#endif
parent = parent->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
// ROSE_ASSERT(parent != previouslySeenParent);
if (parent == previouslySeenParent)
{
foundCycle = true;
}
counter++;
}
#if DEBUG_GET_ENCLOSING_NODE
printf ("previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
parent = previouslySeenParent;
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if 0
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p \n",declarationStatement);
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the non-defining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
#if 0
printf ("reset: previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
// DQ (10/19/2012): This branch is just to document the cycle that was previously detected, it is for
// debugging only. Thus it ony make sense for it to be executed when "(foundCycle == true)". However,
// this will have to be revisited later since it appears clear that it is a problem for the binary analysis
// work when it is visited for this case. Since the cycle is detected, but there is no assertion on the
// cycle, we don't exit when a cycle is identified (which is the point of the code below).
// Note also that I have fixed the code (above and below) to only chase pointers through defining
// declarations (where they exist), this is important since non-defining declarations can be almost
// anywhere (and thus chasing them can make it appear that there are cycles where there are none
// (I think); test2012_234.C demonstrates an example of this.
// DQ (10/9/2012): Robb has suggested this change to fix the binary analysis work.
// if (foundCycle == true)
if (foundCycle == false)
{
while ( (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if 0
printf ("In getEnclosingNode() (2nd try): parent = %p = %s \n",parent,parent->class_name().c_str());
if (parent->get_file_info() != NULL)
parent->get_file_info()->display("In getEnclosingNode() (2nd try): debug");
#endif
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if DEBUG_GET_ENCLOSING_NODE
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p = %s \n",declarationStatement,(declarationStatement != NULL) ? declarationStatement->class_name().c_str() : "null");
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the firstNondefining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
parent = parent->get_parent();
#if 1
// DQ (3/5/2012): Check for loops that will cause infinite loops.
ROSE_ASSERT(parent != previouslySeenParent);
#else
printf ("WARNING::WARNING::WARNING commented out assertion for parent != previouslySeenParent \n");
if (parent == previouslySeenParent)
break;
#endif
}
}
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (parent));
#else
// DQ (10/20/2012): Using Robb's newer version with my modification to use the definingDeclaration rather than firstNondefiningDeclaration (below).
// Find the parent of specified type, but watch out for cycles in the ancestry (which would cause an infinite loop).
// Cast away const because isSg* functions aren't defined for const node pointers; and our return is not const.
SgNode *node = const_cast<SgNode*>(!astNode || includingSelf ? astNode : astNode->get_parent());
std::set<const SgNode*> seen; // nodes we've seen, in order to detect cycles
while (node) {
if (NodeType *found = dynamic_cast<NodeType*>(node))
return found;
// FIXME: Cycle detection could be moved elsewhere so we don't need to do it on every call. [RPM 2012-10-09]
// DQ (12/30/2019): Provide more detail in error message.
if (seen.insert(node).second == false)
{
printf ("Error: node is already in set and defines a cycle: node = %p = %s \n",node,node->class_name().c_str());
std::set<const SgNode*>::const_iterator i = seen.begin();
while (i != seen.end())
{
const SgNode* element = *i;
printf (" --- seen element: element = %p = %s \n",element,element->class_name().c_str());
i++;
}
printf ("Exiting after error! \n");
ROSE_ASSERT(false);
}
// ROSE_ASSERT(seen.insert(node).second);
// Traverse to parent (declaration statements are a special case)
if (SgDeclarationStatement *declarationStatement = isSgDeclarationStatement(node)) {
SgDeclarationStatement *definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement *firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
if (definingDeclaration && firstNondefiningDeclaration && declarationStatement != firstNondefiningDeclaration) {
// DQ (10/19/2012): Use the defining declaration instead.
// node = firstNondefiningDeclaration;
node = definingDeclaration;
}
} else {
node = node->get_parent();
}
}
return NULL;
#endif
}
//! Find enclosing source file node
ROSE_DLL_API SgSourceFile* getEnclosingSourceFile(SgNode* n, const bool includingSelf=false);
//! Get the closest scope from astNode. Return astNode if it is already a scope.
ROSE_DLL_API SgScopeStatement* getScope(const SgNode* astNode);
//! Get the enclosing scope from a node n
ROSE_DLL_API SgScopeStatement* getEnclosingScope(SgNode* n, const bool includingSelf=false);
//! Traverse back through a node's parents to find the enclosing global scope
ROSE_DLL_API SgGlobal* getGlobalScope( const SgNode* astNode);
//! Find the function definition
ROSE_DLL_API SgFunctionDefinition* getEnclosingProcedure(SgNode* n, const bool includingSelf=false);
ROSE_DLL_API SgFunctionDefinition* getEnclosingFunctionDefinition(SgNode* astNode, const bool includingSelf=false);
//! Find the closest enclosing statement, including the given node
ROSE_DLL_API SgStatement* getEnclosingStatement(SgNode* n);
//! Find the closest switch outside a given statement (normally used for case and default statements)
ROSE_DLL_API SgSwitchStatement* findEnclosingSwitch(SgStatement* s);
//! Find enclosing OpenMP clause body statement from s. If s is already one, return it directly.
ROSE_DLL_API SgOmpClauseBodyStatement* findEnclosingOmpClauseBodyStatement(SgStatement* s);
//! Find the closest loop outside the given statement; if fortranLabel is not empty, the Fortran label of the loop must be equal to it
ROSE_DLL_API SgScopeStatement* findEnclosingLoop(SgStatement* s, const std::string& fortranLabel = "", bool stopOnSwitches = false);
//! Find the enclosing function declaration, including its derived instances like isSgProcedureHeaderStatement, isSgProgramHeaderStatement, and isSgMemberFunctionDeclaration.
ROSE_DLL_API SgFunctionDeclaration * getEnclosingFunctionDeclaration (SgNode * astNode, const bool includingSelf=false);
//roseSupport/utility_functions.h
//! get the SgFile node from current node
ROSE_DLL_API SgFile* getEnclosingFileNode (SgNode* astNode );
//! Get the initializer containing an expression if it is within an initializer.
ROSE_DLL_API SgInitializer* getInitializerOfExpression(SgExpression* n);
//! Get the closest class definition enclosing the specified AST node,
ROSE_DLL_API SgClassDefinition* getEnclosingClassDefinition(SgNode* astnode, const bool includingSelf=false);
//! Get the closest class declaration enclosing the specified AST node,
ROSE_DLL_API SgClassDeclaration* getEnclosingClassDeclaration( SgNode* astNode );
// DQ (2/7/2019): Adding support for name qualification of variable references associated with SgPointerMemberType function parameters.
//! Get the enclosing SgExprListExp (used as part of function argument index evaluation in subexpressions).
ROSE_DLL_API SgExprListExp* getEnclosingExprListExp(SgNode* astNode, const bool includingSelf = false);
// DQ (2/7/2019): Need a function to return when an expression is in an expression subtree.
// This is part of index evaluation ofr expressions in function argument lists, but likely usefule elsewhere as well.
ROSE_DLL_API bool isInSubTree(SgExpression* subtree, SgExpression* exp);
// DQ (2/7/2019): Need a function to return the SgFunctionDeclaration from a SgFunctionCallExp.
ROSE_DLL_API SgFunctionDeclaration* getFunctionDeclaration ( SgFunctionCallExp* functionCallExp );
// DQ (2/17/2019): Generalizing this support for SgVarRefExp and SgMemberFunctionRefExp nodes.
// DQ (2/8/2019): Adding support for detecting when to use added name qualification for pointer-to-member expressions.
ROSE_DLL_API bool isDataMemberReference(SgVarRefExp* varRefExp);
// ROSE_DLL_API bool isAddressTaken(SgVarRefExp* varRefExp);
ROSE_DLL_API bool isAddressTaken(SgExpression* refExp);
// DQ (2/17/2019): Adding support for detecting when to use added name qualification for membr function references.
ROSE_DLL_API bool isMemberFunctionMemberReference(SgMemberFunctionRefExp* memberFunctionRefExp);
// DQ (2/15/2019): Adding support for detecting which class a member reference is being made from.
// ROSE_DLL_API SgClassType* getClassTypeForDataMemberReference(SgVarRefExp* varRefExp);
// ROSE_DLL_API std::list<SgClassType*> getClassTypeChainForDataMemberReference(SgVarRefExp* varRefExp);
ROSE_DLL_API std::list<SgClassType*> getClassTypeChainForMemberReference(SgExpression* refExp);
// DQ (2/17/2019): Display the shared nodes in the AST for debugging.
ROSE_DLL_API void outputSharedNodes( SgNode* node );
// TODO
#if 0
SgNode * getEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
std::vector<SgNode *> getAllEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
SgVariableDeclaration* findVariableDeclaratin( const string& varname)
SgClassDeclaration* getEnclosingClassDeclaration( const SgNode* astNode);
// e.g. for some expression, find its parent statement
SgStatement* getEnclosingStatement(const SgNode* astNode);
SgSwitchStatement* getEnclosingSwitch(SgStatement* s);
SgModuleStatement* getEnclosingModuleStatement( const SgNode* astNode);
// used to build a variable reference for compiler generated code in current scope
SgSymbol * findReachingDefinition (SgScopeStatement* startScope, SgName &name);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Walk and Traversal
\brief
*/
// Liao, 1/9/2008
/*!
\brief return the first global scope under current project
*/
ROSE_DLL_API SgGlobal * getFirstGlobalScope(SgProject *project);
/*!
\brief get the last statement within a scope, return NULL if it does not exit
*/
ROSE_DLL_API SgStatement* getLastStatement(SgScopeStatement *scope);
//! Get the first statement within a scope, return NULL if it does not exist. Skip compiler-generated statement by default. Count transformation-generated ones, but excluding those which are not to be outputted in unparsers.
ROSE_DLL_API SgStatement* getFirstStatement(SgScopeStatement *scope,bool includingCompilerGenerated=false);
//!Find the first defining function declaration statement in a scope
ROSE_DLL_API SgFunctionDeclaration* findFirstDefiningFunctionDecl(SgScopeStatement* scope);
//! Get next statement within the same scope of current statement
ROSE_DLL_API SgStatement* getNextStatement(SgStatement * currentStmt);
//! Get previous statement of the current statement. It may return a previous statement of a parent scope by default (climbOutScope is true), otherwise only a previous statement of the same scope is returned.
ROSE_DLL_API SgStatement* getPreviousStatement(SgStatement * currentStmt, bool climbOutScope = true);
#if 0 //TODO
// preorder traversal from current SgNode till find next SgNode of type V_SgXXX
SgNode* getNextSgNode( const SgNode* currentNode, VariantT=V_SgNode);
#endif
// DQ (11/15/2018): Adding support for traversals over the include file tree.
//! return path prefix for subtree of include files.
void listHeaderFiles ( SgIncludeFile* includeFile );
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Comparison
\brief Compare AST nodes, subtree, etc
*/
//! Check if a SgIntVal node has a given value
ROSE_DLL_API bool isEqualToIntConst(SgExpression* e, int value);
//! Check if two function declarations refer to the same one. Two function declarations are the same when they are a) identical, b) same name in C c) same qualified named and mangled name in C++. A nondefining (prototype) declaration and a defining declaration of a same function are treated as the same.
/*!
* There is a similar function bool compareFunctionDeclarations(SgFunctionDeclaration *f1, SgFunctionDeclaration *f2) from Classhierarchy.C
*/
ROSE_DLL_API bool isSameFunction(SgFunctionDeclaration* func1, SgFunctionDeclaration* func2);
//! Check if a statement is the last statement within its closed scope
ROSE_DLL_API bool isLastStatement(SgStatement* stmt);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST insert, removal, and replacement
\brief Add, remove,and replace AST
scope->append_statement(), exprListExp->append_expression() etc. are not enough to handle side effect of parent pointers, symbol tables, preprocessing info, defining/nondefining pointers etc.
*/
// DQ (2/24/2009): Simple function to delete an AST subtree (used in outlining).
//! Function to delete AST subtree's nodes only, users must take care of any dangling pointers, symbols or types that result.
ROSE_DLL_API void deleteAST(SgNode* node);
//! Special purpose function for deleting AST expression tress containing valid original expression trees in constant folded expressions (for internal use only).
ROSE_DLL_API void deleteExpressionTreeWithOriginalExpressionSubtrees(SgNode* root);
// DQ (2/25/2009): Added new function to support outliner.
//! Move statements in first block to the second block (preserves order and rebuilds the symbol table).
ROSE_DLL_API void moveStatementsBetweenBlocks ( SgBasicBlock* sourceBlock, SgBasicBlock* targetBlock );
//! Move a variable declaration to a new scope, handle symbol, special scopes like For loop, etc.
ROSE_DLL_API void moveVariableDeclaration(SgVariableDeclaration* decl, SgScopeStatement* target_scope);
//! Append a statement to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! Append a statement to the end of SgForInitStatement
ROSE_DLL_API void appendStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt);
//! Append a list of statements to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
// DQ (2/6/2009): Added function to support outlining into separate file.
//! Append a copy ('decl') of a function ('original_statement') into a 'scope', include any referenced declarations required if the scope is within a compiler generated file. All referenced declarations, including those from headers, are inserted if excludeHeaderFiles is set to true (the new file will not have any headers).
ROSE_DLL_API void appendStatementWithDependentDeclaration( SgDeclarationStatement* decl, SgGlobal* scope, SgStatement* original_statement, bool excludeHeaderFiles );
//! Prepend a statement to the beginning of the current scope, handling side
//! effects as appropriate
ROSE_DLL_API void prependStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! Prepend a statement to the beginning of SgForInitStatement
ROSE_DLL_API void prependStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt);
//! prepend a list of statements to the beginning of the current scope,
//! handling side effects as appropriate
ROSE_DLL_API void prependStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
//! Check if a scope statement has a simple children statement list
//! so insert additional statements under the scope is straightforward and unambiguous .
//! for example, SgBasicBlock has a simple statement list while IfStmt does not.
ROSE_DLL_API bool hasSimpleChildrenList (SgScopeStatement* scope);
//! Insert a statement before or after the target statement within the target's scope. Move around preprocessing info automatically
ROSE_DLL_API void insertStatement(SgStatement *targetStmt, SgStatement* newStmt, bool insertBefore= true, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before or after the target statement within the
//target's scope
ROSE_DLL_API void insertStatementList(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts, bool insertBefore= true);
//! Insert a statement before a target statement
ROSE_DLL_API void insertStatementBefore(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before a target statement
ROSE_DLL_API void insertStatementListBefore(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts);
//! Insert a statement after a target statement, Move around preprocessing info automatically by default
ROSE_DLL_API void insertStatementAfter(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements after a target statement
ROSE_DLL_API void insertStatementListAfter(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmt);
//! Insert a statement after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(SgStatement* stmt, SgScopeStatement* scope);
//! Insert a list of statements after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(std::vector<SgStatement*> stmt_list, SgScopeStatement* scope);
//! Insert a statement before the first non-declaration statement in a scope. If the scope has no non-declaration statements
// then the statement is inserted at the end of the scope.
ROSE_DLL_API void insertStatementBeforeFirstNonDeclaration(SgStatement *newStmt, SgScopeStatement *scope,
bool movePreprocessingInfo=true);
//! Insert statements before the first non-declaration statement in a scope. If the scope has no non-declaration statements
//then the new statements are inserted at the end of the scope.
ROSE_DLL_API void insertStatementListBeforeFirstNonDeclaration(const std::vector<SgStatement*> &newStmts, SgScopeStatement *scope);
// DQ (11/21/2018): We need to sometimes insert something after the last statement of the collection from rose_edg_required_macros_and_functions.h.
ROSE_DLL_API SgStatement* lastFrontEndSpecificStatement( SgGlobal* globalScope );
//! Remove a statement from its attach point of the AST. Automatically keep its associated preprocessing information at the original place after the removal. The statement is still in memory and it is up to the users to decide if the removed one will be inserted somewhere else or released from memory (deleteAST()).
ROSE_DLL_API void removeStatement(SgStatement* stmt, bool autoRelocatePreprocessingInfo = true);
//! Deep delete a sub AST tree. It uses postorder traversal to delete each child node. Users must take care of any dangling pointers, symbols or types that result. This is identical to deleteAST()
ROSE_DLL_API void deepDelete(SgNode* root);
//! Replace a statement with another. Move preprocessing information from oldStmt to newStmt if requested.
ROSE_DLL_API void replaceStatement(SgStatement* oldStmt, SgStatement* newStmt, bool movePreprocessinInfo = false);
//! Replace an anchor node with a specified pattern subtree with optional SgVariantExpression. All SgVariantExpression in the pattern will be replaced with copies of the anchor node.
ROSE_DLL_API SgNode* replaceWithPattern (SgNode * anchor, SgNode* new_pattern);
//! Replace all variable references to an old symbol in a scope to being references to a new symbol.
// Essentially replace variable a with b.
ROSE_DLL_API void replaceVariableReferences(SgVariableSymbol* old_sym, SgVariableSymbol* new_sym, SgScopeStatement * scope );
// DQ (11/12/2018): Adding test to avoid issues that we can't test for in the unparsing of header files using the token based unparsing.
//! If header file unparsing and token-based unparsing are used, then some statements in header files
//! used with the same name and different include syntax can't be transformed. This is currently because
//! there is no way to generally test the resulting transformed code generated by ROSE.
ROSE_DLL_API bool statementCanBeTransformed(SgStatement* stmt);
/** Given an expression, generates a temporary variable whose initializer optionally evaluates
* that expression. Then, the var reference expression returned can be used instead of the original
* expression. The temporary variable created can be reassigned to the expression by the returned SgAssignOp;
* this can be used when the expression the variable represents needs to be evaluated. NOTE: This handles
* reference types correctly by using pointer types for the temporary.
* @param expression Expression which will be replaced by a variable
* @param scope scope in which the temporary variable will be generated
* @param reEvaluate an assignment op to reevaluate the expression. Leave NULL if not needed
* @return declaration of the temporary variable, and a a variable reference expression to use instead of
* the original expression. */
std::pair<SgVariableDeclaration*, SgExpression* > createTempVariableForExpression(SgExpression* expression,
SgScopeStatement* scope, bool initializeInDeclaration, SgAssignOp** reEvaluate = NULL);
/* This function creates a temporary variable for a given expression in the given scope
This is different from SageInterface::createTempVariableForExpression in that it does not
try to be smart to create pointers to reference types and so on. The tempt is initialized to expression.
The caller is responsible for setting the parent of SgVariableDeclaration since buildVariableDeclaration
may not set_parent() when the scope stack is empty. See programTransformation/extractFunctionArgumentsNormalization/ExtractFunctionArguments.C for sample usage.
@param expression Expression which will be replaced by a variable
@param scope scope in which the temporary variable will be generated
*/
std::pair<SgVariableDeclaration*, SgExpression*> createTempVariableAndReferenceForExpression
(SgExpression* expression, SgScopeStatement* scope);
//! Append an argument to SgFunctionParameterList, transparently set parent,scope, and symbols for arguments when possible
/*! We recommend to build SgFunctionParameterList before building a function declaration
However, it is still allowed to append new arguments for existing function declarations.
\todo function type , function symbol also need attention.
*/
ROSE_DLL_API SgVariableSymbol* appendArg(SgFunctionParameterList *, SgInitializedName*);
//!Prepend an argument to SgFunctionParameterList
ROSE_DLL_API SgVariableSymbol* prependArg(SgFunctionParameterList *, SgInitializedName*);
//! Append an expression to a SgExprListExp, set the parent pointer also
ROSE_DLL_API void appendExpression(SgExprListExp *, SgExpression*);
//! Append an expression list to a SgExprListExp, set the parent pointers also
ROSE_DLL_API void appendExpressionList(SgExprListExp *, const std::vector<SgExpression*>&);
//! Set parameter list for a function declaration, considering existing parameter list etc.
template <class actualFunction>
void setParameterList(actualFunction *func,SgFunctionParameterList *paralist) {
// TODO consider the difference between C++ and Fortran
// fixup the scope of arguments,no symbols for nondefining function declaration's arguments
// DQ (11/25/2011): templated function so that we can handle both
// SgFunctionDeclaration and SgTemplateFunctionDeclaration (and their associated member
// function derived classes).
ROSE_ASSERT(func != NULL);
ROSE_ASSERT(paralist != NULL);
#if 0
// At this point we don't have cerr and endl defined, so comment this code out.
// Warn to users if a paralist is being shared
if (paralist->get_parent() !=NULL)
{
cerr << "Waring! Setting a used SgFunctionParameterList to function: "
<< (func->get_name()).getString()<<endl
<< " Sharing parameter lists can corrupt symbol tables!"<<endl
<< " Please use deepCopy() to get an exclusive parameter list for each function declaration!"<<endl;
// ROSE_ASSERT(false);
}
#endif
// Liao,2/5/2008 constructor of SgFunctionDeclaration will automatically generate SgFunctionParameterList, so be cautious when set new paralist!!
if (func->get_parameterList() != NULL)
{
if (func->get_parameterList() != paralist)
{
delete func->get_parameterList();
}
}
func->set_parameterList(paralist);
paralist->set_parent(func);
// DQ (5/15/2012): Need to set the declptr in each SgInitializedName IR node.
// This is needed to support the AST Copy mechanism (at least). The files: test2005_150.C,
// test2012_81.C and testcode2012_82.C demonstrate this problem.
SgInitializedNamePtrList & args = paralist->get_args();
for (SgInitializedNamePtrList::iterator i = args.begin(); i != args.end(); i++)
{
(*i)->set_declptr(func);
}
}
//! Set a pragma of a pragma declaration. handle memory release for preexisting pragma, and set parent pointer.
ROSE_DLL_API void setPragma(SgPragmaDeclaration* decl, SgPragma *pragma);
//! Replace an expression with another, used for variable reference substitution and others. the old expression can be deleted (default case) or kept.
ROSE_DLL_API void replaceExpression(SgExpression* oldExp, SgExpression* newExp, bool keepOldExp=false);
//! Replace a given expression with a list of statements produced by a generator
ROSE_DLL_API void replaceExpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Similar to replaceExpressionWithStatement, but with more restrictions.
//! Assumptions: from is not within the test of a loop or ifStmt, not currently traversing from or the statement it is in
ROSE_DLL_API void replaceSubexpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Set operands for expressions with single operand, such as unary expressions. handle file info, lvalue, pointer downcasting, parent pointer etc.
ROSE_DLL_API void setOperand(SgExpression* target, SgExpression* operand);
//!set left hand operand for binary expressions, transparently downcasting target expressions when necessary
ROSE_DLL_API void setLhsOperand(SgExpression* target, SgExpression* lhs);
//!set left hand operand for binary expression
ROSE_DLL_API void setRhsOperand(SgExpression* target, SgExpression* rhs);
//! Set original expression trees to NULL for SgValueExp or SgCastExp expressions, so you can change the value and have it unparsed correctly.
ROSE_DLL_API void removeAllOriginalExpressionTrees(SgNode* top);
// DQ (1/25/2010): Added support for directories
//! Move file to be generated in a subdirectory (will be generated by the unparser).
ROSE_DLL_API void moveToSubdirectory ( std::string directoryName, SgFile* file );
//! Supporting function to comment relocation in insertStatement() and removeStatement().
ROSE_DLL_API SgStatement* findSurroundingStatementFromSameFile(SgStatement* targetStmt, bool & surroundingStatementPreceedsTargetStatement);
//! Relocate comments and CPP directives from one statement to another.
ROSE_DLL_API void moveCommentsToNewStatement(SgStatement* sourceStatement, const std::vector<int> & indexList, SgStatement* targetStatement, bool surroundingStatementPreceedsTargetStatement);
// DQ (7/19/2015): This is required to support general unparsing of template instantations for the GNU g++
// compiler which does not permit name qualification to be used to support the expression of the namespace
// where a template instantiatoon would be places. Such name qualification would also sometimes require
// global qualification which is also not allowed by the GNU g++ compiler. These issues appear to be
// specific to the GNU compiler versions, at least versions 4.4 through 4.8.
//! Relocate the declaration to be explicitly represented in its associated namespace (required for some backend compilers to process template instantiations).
ROSE_DLL_API void moveDeclarationToAssociatedNamespace ( SgDeclarationStatement* declarationStatement );
ROSE_DLL_API bool isTemplateInstantiationNode(SgNode* node);
ROSE_DLL_API void wrapAllTemplateInstantiationsInAssociatedNamespaces(SgProject* root);
// DQ (12/1/2015): Adding support for fixup internal data struuctures that have references to statements (e.g. macro expansions).
ROSE_DLL_API void resetInternalMapsForTargetStatement(SgStatement* sourceStatement);
// DQ (6/7/2019): Add support for transforming function definitions to function prototypes in a subtree.
// We might have to make this specific to a file (only traversing the functions in that file).
/*!\brief XXX
* This function operates on the new file used to support outlined function definitions.
* We use a copy of the file where the code will be outlined FROM, so that if there are references to
* declarations in the outlined code we can support the outpiled code with those references. This
* approach has the added advantage of also supporting the same include file tree as the original
* file where the outlined code is being taken from.
*/
ROSE_DLL_API void convertFunctionDefinitionsToFunctionPrototypes(SgNode* node);
// DQ (11/10/2019): Lower level support for convertFunctionDefinitionsToFunctionPrototypes().
ROSE_DLL_API void replaceDefiningFunctionDeclarationWithFunctionPrototype ( SgFunctionDeclaration* functionDeclaration );
ROSE_DLL_API std::vector<SgFunctionDeclaration*> generateFunctionDefinitionsList(SgNode* node);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST repair, fix, and postprocessing.
\brief Mostly used internally when some AST pieces are built without knowing their target
scope/parent, especially during bottom-up construction of AST. The associated symbols,
parent and scope pointers cannot be set on construction then.
A set of utility functions are provided to
patch up scope, parent, symbol for them when the target scope/parent become know.
*/
//! Connect variable reference to the right variable symbols when feasible, return the number of references being fixed.
/*! In AST translation, it is possible to build a variable reference before the variable
is being declared. buildVarRefExp() will use fake initialized name and symbol as placeholders
to get the work done. Users should call fixVariableReference() when AST is complete and all
variable declarations are in place.
*/
ROSE_DLL_API int fixVariableReferences(SgNode* root, bool cleanUnusedSymbol=true);
//!Patch up symbol, scope, and parent information when a SgVariableDeclaration's scope is known.
/*!
It is possible to build a variable declaration without knowing its scope information during bottom-up construction of AST, though top-down construction is recommended in general.
In this case, we have to patch up symbol table, scope and parent information when the scope is known. This function is usually used internally within appendStatment(), insertStatement().
*/
ROSE_DLL_API void fixVariableDeclaration(SgVariableDeclaration* varDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a struct declaration was built without knowing its target scope.
ROSE_DLL_API void fixStructDeclaration(SgClassDeclaration* structDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a class declaration was built without knowing its target scope.
ROSE_DLL_API void fixClassDeclaration(SgClassDeclaration* classDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a namespace declaration was built without knowing its target scope.
ROSE_DLL_API void fixNamespaceDeclaration(SgNamespaceDeclarationStatement* structDecl, SgScopeStatement* scope);
//! Fix symbol table for SgLabelStatement. Used Internally when the label is built without knowing its target scope. Both parameters cannot be NULL.
ROSE_DLL_API void fixLabelStatement(SgLabelStatement* label_stmt, SgScopeStatement* scope);
//! Set a numerical label for a Fortran statement. The statement should have a enclosing function definition already. SgLabelSymbol and SgLabelRefExp are created transparently as needed.
ROSE_DLL_API void setFortranNumericLabel(SgStatement* stmt, int label_value,
SgLabelSymbol::label_type_enum label_type=SgLabelSymbol::e_start_label_type,
SgScopeStatement* label_scope=NULL);
//! Suggest next usable (non-conflicting) numeric label value for a Fortran function definition scope
ROSE_DLL_API int suggestNextNumericLabel(SgFunctionDefinition* func_def);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixFunctionDeclaration(SgFunctionDeclaration* stmt, SgScopeStatement* scope);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixTemplateDeclaration(SgTemplateDeclaration* stmt, SgScopeStatement* scope);
//! A wrapper containing fixes (fixVariableDeclaration(),fixStructDeclaration(), fixLabelStatement(), etc) for all kinds statements. Should be used before attaching the statement into AST.
ROSE_DLL_API void fixStatement(SgStatement* stmt, SgScopeStatement* scope);
// DQ (6/11/2015): This reports the statements that are marked as transformed (used to debug the token-based unparsing).
//! This collects the statements that are marked as transformed (useful in debugging).
ROSE_DLL_API std::set<SgStatement*> collectTransformedStatements( SgNode* node );
//! This collects the statements that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging).
ROSE_DLL_API std::set<SgStatement*> collectModifiedStatements( SgNode* node );
//! This collects the SgLocatedNodes that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging).
ROSE_DLL_API std::set<SgLocatedNode*> collectModifiedLocatedNodes( SgNode* node );
// DQ (6/5/2019): Use the previously constructed set (above) to reset the IR nodes to be marked as isModified.
//! Use the set of IR nodes and set the isModified flag in each IR node to true.
ROSE_DLL_API void resetModifiedLocatedNodes(const std::set<SgLocatedNode*> & modifiedNodeSet);
// DQ (10/23/2018): Report nodes that are marked as modified.
ROSE_DLL_API void reportModifiedStatements(const std::string & label, SgNode* node);
// DQ (3/22/2019): Translate CPP directives from attached preprocessor information to CPP Directive Declaration IR nodes.
ROSE_DLL_API void translateToUseCppDeclarations( SgNode* n );
ROSE_DLL_API void translateScopeToUseCppDeclarations( SgScopeStatement* scope );
ROSE_DLL_API std::vector<SgC_PreprocessorDirectiveStatement*> translateStatementToUseCppDeclarations( SgStatement* statement, SgScopeStatement* scope);
ROSE_DLL_API void printOutComments ( SgLocatedNode* locatedNode );
ROSE_DLL_API bool skipTranslateToUseCppDeclaration( PreprocessingInfo* currentPreprocessingInfo );
// DQ (12/2/2019): Debugging support.
ROSE_DLL_API void outputFileIds( SgNode* node );
//@}
//! Update defining and nondefining links due to a newly introduced function declaration. Should be used after inserting the function into a scope.
/*! This function not only set the defining and nondefining links of the newly introduced
* function declaration inside a scope, but also update other same function declarations' links
* accordingly if there are any.
* Assumption: The function has already inserted/appended/prepended into the scope before calling this function.
*/
ROSE_DLL_API void updateDefiningNondefiningLinks(SgFunctionDeclaration* func, SgScopeStatement* scope);
//------------------------------------------------------------------------
//@{
/*! @name Advanced AST transformations, analyses, and optimizations
\brief Some complex but commonly used AST transformations.
*/
//! Collect all read and write references within stmt, which can be a function, a scope statement, or a single statement. Note that a reference can be both read and written, like i++
ROSE_DLL_API bool
collectReadWriteRefs(SgStatement* stmt, std::vector<SgNode*>& readRefs, std::vector<SgNode*>& writeRefs, bool useCachedDefUse=false);
//!Collect unique variables which are read or written within a statement. Note that a variable can be both read and written. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API bool collectReadWriteVariables(SgStatement* stmt, std::set<SgInitializedName*>& readVars, std::set<SgInitializedName*>& writeVars, bool coarseGrain=true);
//!Collect read only variables within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API void collectReadOnlyVariables(SgStatement* stmt, std::set<SgInitializedName*>& readOnlyVars, bool coarseGrain=true);
//!Collect read only variable symbols within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API void collectReadOnlySymbols(SgStatement* stmt, std::set<SgVariableSymbol*>& readOnlySymbols, bool coarseGrain=true);
//! Check if a variable reference is used by its address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API bool isUseByAddressVariableRef(SgVarRefExp* ref);
//! Collect variable references involving use by address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API void collectUseByAddressVariableRefs (const SgStatement* s, std::set<SgVarRefExp* >& varSetB);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//!Call liveness analysis on an entire project
ROSE_DLL_API LivenessAnalysis * call_liveness_analysis(SgProject* project, bool debug=false);
//!get liveIn and liveOut variables for a for loop from liveness analysis result liv.
ROSE_DLL_API void getLiveVariables(LivenessAnalysis * liv, SgForStatement* loop, std::set<SgInitializedName*>& liveIns, std::set<SgInitializedName*> & liveOuts);
#endif
//!Recognize and collect reduction variables and operations within a C/C++ loop, following OpenMP 3.0 specification for allowed reduction variable types and operation types.
ROSE_DLL_API void ReductionRecognition(SgForStatement* loop, std::set< std::pair <SgInitializedName*, OmpSupport::omp_construct_enum> > & results);
//! Constant folding an AST subtree rooted at 'r' (replacing its children with their constant values, if applicable). Please be advised that constant folding on floating point computation may decrease the accuracy of floating point computations!
/*! It is a wrapper function for ConstantFolding::constantFoldingOptimization(). Note that only r's children are replaced with their corresponding constant values, not the input SgNode r itself. You have to call this upon an expression's parent node if you want to fold the expression. */
ROSE_DLL_API void constantFolding(SgNode* r);
//!Instrument(Add a statement, often a function call) into a function right before the return points, handle multiple return statements (with duplicated statement s) and return expressions with side effects. Return the number of statements inserted.
/*! Useful when adding a runtime library call to terminate the runtime system right before the end of a program, especially for OpenMP and UPC runtime systems. Return with complex expressions with side effects are rewritten using an additional assignment statement.
*/
ROSE_DLL_API int instrumentEndOfFunction(SgFunctionDeclaration * func, SgStatement* s);
//! Remove jumps whose label is immediately after the jump. Used to clean up inlined code fragments.
ROSE_DLL_API void removeJumpsToNextStatement(SgNode*);
//! Remove labels which are not targets of any goto statements
ROSE_DLL_API void removeUnusedLabels(SgNode* top);
//! Remove consecutive labels
ROSE_DLL_API void removeConsecutiveLabels(SgNode* top);
//! Merge a variable assignment statement into a matching variable declaration statement. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check.
/*!
* e.g. int i; i=10; becomes int i=10; the original i=10 will be deleted after the merge
* if success, return true, otherwise return false (e.g. variable declaration does not match or already has an initializer)
* The original assignment stmt will be removed by default
* This function is a bit ambiguous about the merge direction, to be phased out.
*/
ROSE_DLL_API bool mergeDeclarationAndAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt, bool removeAssignStmt = true);
//! Merge an assignment into its upstream declaration statement. Callers should make sure the merge is semantically correct.
ROSE_DLL_API bool mergeAssignmentWithDeclaration (SgExprStatement* assign_stmt, SgVariableDeclaration* decl, bool removeAssignStmt = true);
//! Merge a declaration statement into a matching followed variable assignment. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check.
/*!
* e.g. int i; i=10; becomes int i=10; the original int i; will be deleted after the merge
*/
ROSE_DLL_API bool mergeDeclarationWithAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt);
//! Split a variable declaration with an rhs assignment into two statements: a declaration and an assignment.
/*! Return the generated assignment statement, if any
* e.g. int i =10; becomes int i; i=10;
* This can be seen as a normalization of declarations
*/
ROSE_DLL_API SgExprStatement* splitVariableDeclaration (SgVariableDeclaration* decl);
//! Split declarations within a scope into declarations and assignment statements, by default only top level declarations are considered. Return the number of declarations split.
ROSE_DLL_API int splitVariableDeclaration (SgScopeStatement* scope, bool topLevelOnly = true);
//! Replace an expression with a temporary variable and an assignment statement
/*!
Add a new temporary variable to contain the value of 'from'
Change reference to 'from' to use this new variable
Assumptions: 'from' is not within the test of a loop or 'if'
not currently traversing 'from' or the statement it is in
*/
ROSE_DLL_API SgAssignInitializer* splitExpression(SgExpression* from, std::string newName = "");
//! Split long expressions into blocks of statements
ROSE_DLL_API void splitExpressionIntoBasicBlock(SgExpression* expr);
//! Remove labeled goto statements
ROSE_DLL_API void removeLabeledGotos(SgNode* top);
//! If the given statement contains any break statements in its body, add a new label below the statement and change the breaks into gotos to that new label.
ROSE_DLL_API void changeBreakStatementsToGotos(SgStatement* loopOrSwitch);
//! Check if the body of a 'for' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfFor(SgForStatement* fs);
//! Check if the body of a 'upc_forall' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfUpcForAll(SgUpcForAllStatement* fs);
//! Check if the body of a 'while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfWhile(SgWhileStmt* ws);
//! Check if the body of a 'do .. while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfDoWhile(SgDoWhileStmt* ws);
//! Check if the body of a 'switch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfSwitch(SgSwitchStatement* ws);
//! Check if the body of a 'case option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfCaseOption(SgCaseOptionStmt* cs);
//! Check if the body of a 'default option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfDefaultOption(SgDefaultOptionStmt * cs);
//! Check if the true body of a 'if' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsTrueBodyOfIf(SgIfStmt* ifs);
//! Check if the false body of a 'if' statement is a SgBasicBlock, create one if not when the flag is true.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsFalseBodyOfIf(SgIfStmt* ifs, bool createEmptyBody = true);
//! Check if the body of a 'catch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfCatch(SgCatchOptionStmt* cos);
//! Check if the body of a SgOmpBodyStatement is a SgBasicBlock, create one if not
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfOmpBodyStmt(SgOmpBodyStatement* ompbodyStmt);
// DQ (1/18/2015): This is added to support better quality token-based unparsing.
//! Remove unused basic block IR nodes added as part of normalization.
ROSE_DLL_API void cleanupNontransformedBasicBlockNode();
// DQ (1/18/2015): This is added to support better quality token-based unparsing.
//! Record where normalization have been done so that we can preform denormalizations as required for the token-based unparsing to generate minimal diffs.
ROSE_DLL_API void recordNormalizations(SgStatement* s);
//! Check if a statement is a (true or false) body of a container-like parent, such as For, Upc_forall, Do-while,
//! switch, If, Catch, OmpBodyStmt, etc
bool isBodyStatement (SgStatement* s);
//! Fix up ifs, loops, while, switch, Catch, OmpBodyStatement, etc. to have blocks as body components. It also adds an empty else body to if statements that don't have them.
void changeAllBodiesToBlocks(SgNode* top, bool createEmptyBody = true);
// The same as changeAllBodiesToBlocks(SgNode* top). Phased out.
//void changeAllLoopBodiesToBlocks(SgNode* top);
//! Make a single statement body to be a basic block. Its parent is if, while, catch, or upc_forall etc.
SgBasicBlock * makeSingleStatementBodyToBlock(SgStatement* singleStmt);
#if 0
/** If s is the body of a loop, catch, or if statement and is already a basic block,
* s is returned unmodified. Otherwise generate a SgBasicBlock between s and its parent
* (a loop, catch, or if statement, etc). */
SgLocatedNode* ensureBasicBlockAsParent(SgStatement* s);
#endif
//! Get the constant value from a constant integer expression; abort on
//! everything else. Note that signed long longs are converted to unsigned.
unsigned long long getIntegerConstantValue(SgValueExp* expr);
//! Get a statement's dependent declarations which declares the types used in the statement. The returned vector of declaration statements are sorted according to their appearance order in the original AST. Any reference to a class or template class from a namespace will treated as a reference to the enclosing namespace.
std::vector<SgDeclarationStatement*> getDependentDeclarations (SgStatement* stmt );
//! Insert an expression (new_exp )before another expression (anchor_exp) has possible side effects, without changing the original semantics. This is achieved by using a comma operator: (new_exp, anchor_exp). The comma operator is returned.
SgCommaOpExp *insertBeforeUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp);
//! Insert an expression (new_exp ) after another expression (anchor_exp) has possible side effects, without changing the original semantics. This is done by using two comma operators: type T1; ... ((T1 = anchor_exp, new_exp),T1) )... , where T1 is a temp variable saving the possible side effect of anchor_exp. The top level comma op exp is returned. The reference to T1 in T1 = anchor_exp is saved in temp_ref.
SgCommaOpExp *insertAfterUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp, SgStatement** temp_decl = NULL, SgVarRefExp** temp_ref = NULL);
/// \brief moves the body of a function f to a new function f`;
/// f's body is replaced with code that forwards the call to f`.
/// \return a pair indicating the statement containing the call of f`
/// and an initialized name refering to the temporary variable
/// holding the result of f`. In case f returns void
/// the initialized name is NULL.
/// \param definingDeclaration the defining function declaration of f
/// \param newName the name of function f`
/// \details f's new body becomes { f`(...); } and { int res = f`(...); return res; }
/// for functions returning void and a value, respectively.
/// two function declarations are inserted in f's enclosing scope
/// \code
/// result_type f`(...); <--- (1)
/// result_type f (...) { forward call to f` }
/// result_type f`(...) { original code } <--- (2)
/// \endcode
/// Calls to f are not updated, thus in the transformed code all
/// calls will continue calling f (this is also true for
/// recursive function calls from within the body of f`).
/// After the function has created the wrapper,
/// definingDeclaration becomes the wrapper function
/// The definition of f` is the next entry in the
/// statement list; the forward declaration of f` is the previous
/// entry in the statement list.
/// \pre definingDeclaration must be a defining declaration of a
/// free standing function.
/// typeid(SgFunctionDeclaration) == typeid(definingDeclaration)
/// i.e., this function is NOT implemented for class member functions,
/// template functions, procedures, etc.
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, SgName newName);
/// \overload
/// \tparam NameGen functor that generates a new name based on the old name.
/// interface: SgName nameGen(const SgName&)
/// \param nameGen name generator
/// \brief see wrapFunction for details
template <class NameGen>
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, NameGen nameGen)
{
return wrapFunction(definingDeclaration, nameGen(definingDeclaration.get_name()));
}
/// \brief convenience function that returns the first initialized name in a
/// list of variable declarations.
SgInitializedName& getFirstVariable(SgVariableDeclaration& vardecl);
//@}
// DQ (6/7/2012): Unclear where this function should go...
bool hasTemplateSyntax( const SgName & name );
#if 0
//------------------------AST dump, stringify-----------------------------
//------------------------------------------------------------------------
std::string buildOperatorString ( SgNode* astNode ); //transformationSupport.h
// do we need these?
std::string dump_node(const SgNode* astNode);
std::string dump_tree(const SgNode* astNode);
// or a friendly version of unparseToString(), as a memeber function
std::string SgNode::toString(bool asSubTree=true); // dump node or subtree
//----------------------------AST comparison------------------------------
//------------------------------------------------------------------------
// How to get generic functions for comparison?
bool isNodeEqual(SgNode* node1, SgNode* node2); //?
bool isTreeEqual(SgNode* tree1, SgNode* tree2);
//! Are two expressions equal (using a deep comparison)?
bool expressionTreeEqual(SgExpression*, SgExpression*);
//! Are corresponding expressions in two lists equal (using a deep comparison)?
bool expressionTreeEqualStar(const SgExpressionPtrList&,
const SgExpressionPtrList&);
//----------------------AST verfication/repair----------------------------
//------------------------------------------------------------------------
// sanity check of AST subtree, any suggestions?
// TODO
verifySgNode(SgNode* node, bool subTree=true);
//src/midend/astDiagnostics/AstConsistencyTests.h
// AstTests::runAllTests(SgProject * )
//src/midend/astUtil/astInterface/AstInterface.h.C
//FixSgProject(SgProject &project)
//FixSgTree(SgNode* r)
//src/frontend/SageIII/astPostProcessing
//AstPostProcessing(SgNode * node)
//--------------------------AST modification------------------------------
//------------------------------------------------------------------------
// any operations changing AST tree, including
// insert, copy, delete(remove), replace
// insert before or after some point, argument list is consistent with LowLevelRewrite
void insertAst(SgNode* targetPosition, SgNode* newNode, bool insertBefore=true);
// previous examples
//void myStatementInsert(SgStatement* target,...)
// void AstInterfaceBase::InsertStmt(AstNodePtr const & orig, AstNodePtr const &n, bool insertbefore, bool extractfromBasicBlock)
// copy
// copy children of one basic block to another basic block
//void appendStatementCopy (const SgBasicBlock* a, SgBasicBlock* b);
void copyStatements (const SgBasicBlock* src, SgBasicBlock* dst);
// delete (remove) a node or a whole subtree
void removeSgNode(SgNode* targetNode); // need this?
void removeSgNodeTree(SgNode* subtree); // need this?
void removeStatement( SgStatement* targetStmt);
//Move = delete + insert
void moveAst (SgNode* src, SgNode* target); // need this?
// similar to
void moveStatements (SgBasicBlock* src, SgBasicBlock* target);
// replace= delete old + insert new (via building or copying)
// DQ (1/25/2010): This does not appear to exist as a definition anywhere in ROSE.
// void replaceAst(SgNode* oldNode, SgNode* newNode);
//void replaceChild(SgNode* parent, SgNode* from, SgNode* to);
//bool AstInterface::ReplaceAst( const AstNodePtr& orig, const AstNodePtr& n)
//--------------------------AST transformations---------------------------
//------------------------------------------------------------------------
// Advanced AST modifications through basic AST modifications
// Might not be included in AST utitlity list, but listed here for the record.
// extract statements/content from a scope
void flattenBlocks(SgNode* n);
//src/midend/astInlining/inlinerSupport.h
void renameVariables(SgNode* n);
void renameLabels(SgNode* n, SgFunctionDefinition* enclosingFunctionDefinition);
void simpleCopyAndConstantPropagation(SgNode* top);
void changeAllMembersToPublic(SgNode* n);
void removeVariableDeclaration(SgInitializedName* initname);
//! Convert something like "int a = foo();" into "int a; a = foo();"
SgAssignOp* convertInitializerIntoAssignment(SgAssignInitializer* init);
//! Rewrites a while or for loop so that the official test is changed to
//! "true" and what had previously been the test is now an if-break
//! combination (with an inverted condition) at the beginning of the loop
//! body
void pushTestIntoBody(LoopStatement* loopStmt);
//programTransformation/finiteDifferencing/finiteDifferencing.h
//! Move variables declared in a for statement to just outside that statement.
void moveForDeclaredVariables(SgNode* root);
//------------------------ Is/Has functions ------------------------------
//------------------------------------------------------------------------
// misc. boolean functions
// some of them could moved to SgXXX class as a member function
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
bool isSwitchCond (const SgStatement* s);
bool isIfCond (const SgStatement* s);
bool isWhileCond (const SgStatement* s);
bool isStdNamespace (const SgScopeStatement* scope);
bool isTemplateInst (const SgDeclarationStatement* decl);
bool isCtor (const SgFunctionDeclaration* func);
bool isDtor (const SgFunctionDeclaration* func);
// src/midend/astInlining/typeTraits.h
bool hasTrivialDestructor(SgType* t);
ROSE_DLL_API bool isNonconstReference(SgType* t);
ROSE_DLL_API bool isReferenceType(SgType* t);
// generic ones, or move to the SgXXX class as a member function
bool isConst(SgNode* node); // const type, variable, function, etc.
// .... and more
bool isConstType (const SgType* type);
bool isConstFunction (const SgFunctionDeclaration* decl);
bool isMemberVariable(const SgInitializedName & var);
//bool isMemberVariable(const SgNode& in);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
bool MayRedefined(SgExpression* expr, SgNode* root);
// bool isPotentiallyModified(SgExpression* expr, SgNode* root); // inlinderSupport.h
bool hasAddressTaken(SgExpression* expr, SgNode* root);
//src/midend/astInlining/inlinerSupport.C
// can also classified as topdown search
bool containsVariableReference(SgNode* root, SgInitializedName* var);
bool isDeclarationOf(SgVariableDeclaration* decl, SgInitializedName* var);
bool isPotentiallyModifiedDuringLifeOf(SgBasicBlock* sc,
SgInitializedName* toCheck,
SgInitializedName* lifetime)
//src/midend/programTransformation/partialRedundancyElimination/pre.h
bool anyOfListPotentiallyModifiedIn(const std::vector<SgVariableSymbol*>& syms, SgNode* n);
//------------------------ loop handling ---------------------------------
//------------------------------------------------------------------------
//get and set loop control expressions
// 0: init expr, 1: condition expr, 2: stride expr
SgExpression* getForLoopTripleValues(int valuetype,SgForStatement* forstmt );
int setForLoopTripleValues(int valuetype,SgForStatement* forstmt, SgExpression* exp);
bool isLoopIndexVarRef(SgForStatement* forstmt, SgVarRefExp *varref);
SgInitializedName * getLoopIndexVar(SgForStatement* forstmt);
//------------------------expressions-------------------------------------
//------------------------------------------------------------------------
//src/midend/programTransformation/partialRedundancyElimination/pre.h
int countComputationsOfExpressionIn(SgExpression* expr, SgNode* root);
//src/midend/astInlining/replaceExpressionWithStatement.h
void replaceAssignmentStmtWithStatement(SgExprStatement* from, StatementGenerator* to);
void replaceSubexpressionWithStatement(SgExpression* from,
StatementGenerator* to);
SgExpression* getRootOfExpression(SgExpression* n);
//--------------------------preprocessing info. -------------------------
//------------------------------------------------------------------------
//! Removes all preprocessing information at a given position.
void cutPreprocInfo (SgBasicBlock* b,
PreprocessingInfo::RelativePositionType pos,
AttachedPreprocessingInfoType& save_buf);
//! Pastes preprocessing information at the front of a statement.
void pastePreprocInfoFront (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
//! Pastes preprocessing information at the back of a statement.
void pastePreprocInfoBack (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
/*!
* \brief Moves 'before' preprocessing information.
* Moves all preprocessing information attached 'before' the source
* statement to the front of the destination statement.
*/
// a generic one for all
/// void movePreprocessingInfo(src, dest, RelativePositionType);
void moveBeforePreprocInfo (SgStatement* src, SgStatement* dest);
void moveInsidePreprocInfo (SgBasicBlock* src, SgBasicBlock* dest);
void moveAfterPreprocInfo (SgStatement* src, SgStatement* dest);
//--------------------------------operator--------------------------------
//------------------------------------------------------------------------
from transformationSupport.h, not sure if they should be included here
/* return enum code for SAGE operators */
operatorCodeType classifyOverloadedOperator(); // transformationSupport.h
/*! \brief generates a source code string from operator name.
This function returns a string representing the elementwise operator (for primative types)
that would be match that associated with the overloaded operator for a user-defined
abstractions (e.g. identifyOperator("operator+()") returns "+").
*/
std::string stringifyOperator (std::string name);
//--------------------------------macro ----------------------------------
//------------------------------------------------------------------------
std::string buildMacro ( std::string s ); //transformationSupport.h
//--------------------------------access functions---------------------------
//----------------------------------get/set sth.-----------------------------
// several categories:
* get/set a direct child/grandchild node or fields
* get/set a property flag value
* get a descendent child node using preorder searching
* get an ancestor node using bottomup/reverse searching
// SgName or string?
std::string getFunctionName (SgFunctionCallExp* functionCallExp);
std::string getFunctionTypeName ( SgFunctionCallExp* functionCallExpression );
// do we need them anymore? or existing member functions are enought?
// a generic one:
std::string get_name (const SgNode* node);
std::string get_name (const SgDeclarationStatement * declaration);
// get/set some property: should moved to SgXXX as an inherent memeber function?
// access modifier
void setExtern (SgFunctionDeclartion*)
void clearExtern()
// similarly for other declarations and other properties
void setExtern (SgVariableDeclaration*)
void setPublic()
void setPrivate()
#endif
// DQ (1/23/2013): Added support for generated a set of source sequence entries.
std::set<unsigned int> collectSourceSequenceNumbers( SgNode* astNode );
//--------------------------------Type Traits (C++)---------------------------
bool HasNoThrowAssign(const SgType * const inputType);
bool HasNoThrowCopy(const SgType * const inputType);
bool HasNoThrowConstructor(const SgType * const inputType);
bool HasTrivialAssign(const SgType * const inputType);
bool HasTrivialCopy(const SgType * const inputType);
bool HasTrivialConstructor(const SgType * const inputType);
bool HasTrivialDestructor(const SgType * const inputType);
bool HasVirtualDestructor(const SgType * const inputType);
bool IsBaseOf(const SgType * const inputBaseType, const SgType * const inputDerivedType);
bool IsAbstract(const SgType * const inputType);
bool IsClass(const SgType * const inputType);
bool IsEmpty(const SgType * const inputType);
bool IsEnum(const SgType * const inputType);
bool IsPod(const SgType * const inputType);
bool IsPolymorphic(const SgType * const inputType);
bool IsStandardLayout(const SgType * const inputType);
bool IsLiteralType(const SgType * const inputType);
bool IsTrivial(const SgType * const inputType);
bool IsUnion(const SgType * const inputType);
SgType * UnderlyingType(SgType *type);
// DQ (3/2/2014): Added a new interface function (used in the snippet insertion support).
// void supportForInitializedNameLists ( SgScopeStatement* scope, SgInitializedNamePtrList & variableList );
// DQ (3/4/2014): Added support for testing two trees for equivalents using the AST iterators.
bool isStructurallyEquivalentAST( SgNode* tree1, SgNode* tree2 );
// JP (10/14/24): Moved code to evaluate a const integer expression (like in array size definitions) to SageInterface
/*! The datastructure is used as the return type for SageInterface::evaluateConstIntegerExpression(). One needs to always check whether hasValue_ is true before accessing value_ */
struct const_int_expr_t {
size_t value_;
bool hasValue_;
};
/*! \brief The function tries to evaluate const integer expressions (such as are used in array dimension sizes). It follows variable symbols, and requires constness. */
struct const_int_expr_t evaluateConstIntegerExpression(SgExpression *expr);
// JP (9/17/14): Added function to test whether two SgType* are equivalent or not
bool checkTypesAreEqual(SgType *typeA, SgType *typeB);
//--------------------------------Java interface functions ---------------------
#ifdef ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
ROSE_DLL_API std::string getTempDirectory(SgProject *project);
ROSE_DLL_API void destroyTempDirectory(std::string);
ROSE_DLL_API SgFile *processFile(SgProject *, std::string, bool unparse = false);
ROSE_DLL_API std::string preprocessPackage(SgProject *, std::string);
ROSE_DLL_API std::string preprocessImport(SgProject *, std::string);
ROSE_DLL_API SgFile* preprocessCompilationUnit(SgProject *, std::string, std::string, bool unparse = true);
ROSE_DLL_API SgClassDefinition *findJavaPackage(SgScopeStatement *, std::string);
ROSE_DLL_API SgClassDefinition *findOrInsertJavaPackage(SgProject *, std::string, bool create_directory = false);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassDefinition *package_definition, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, std::string, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassType *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassDefinition *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassType *);
#endif // ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
// DQ (8/31/2016): Making this a template function so that we can have it work with user defined filters.
//! This function detects template instantiations that are relevant when filters are used.
/*!
EDG normalizes some in-class template functions and member functions to be redefined outside of a class. this causes the associated template instantiations
to be declared outside of the class, and to be marked as compiler generated (since the compiler generated form outside of the class declaration).
ROSE captures the function definitions, but in the new location (defined outside of the class declaration). This can confuse some simple tests
for template instantiations that are a part of definitions in a file, thus we have this function to detect this specific normalization.
*/
template < class T >
bool isTemplateInstantiationFromTemplateDeclarationSatisfyingFilter (SgFunctionDeclaration* function, T* filter )
{
// DQ (9/1/2016): This function is called in the Call graph generation to avoid filtering out EDG normalized
// function template instnatiations (which come from normalized template functions and member functions).
// Note that because of the EDG normailzation the membr function is moved outside of the class, and
// thus marked as compiler generated. However the template instantiations are always marked as compiler
// generated (if not specializations) and so we want to include a template instantiation that is marked
// as compiler generated, but is from a template declaration that satisfyied a specific user defined filter.
// The complexity of this detection is isolated here, but knowing that it must be called is more complex.
// This function is call in the CG.C file of tests/nonsmoke/functional/roseTests/programAnalysisTests/testCallGraphAnalysis.
bool retval = false;
#define DEBUG_TEMPLATE_NORMALIZATION_DETECTION 0
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf ("In isNormalizedTemplateInstantiation(): function = %p = %s = %s \n",function,function->class_name().c_str(),function->get_name().str());
#endif
// Test for this to be a template instantation (in which case it was marked as
// compiler generated but we may want to allow it to be used in the call graph,
// if it's template was a part was defined in the current directory).
SgTemplateInstantiationFunctionDecl* templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(function);
SgTemplateInstantiationMemberFunctionDecl* templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(function);
if (templateInstantiationFunction != NULL)
{
// When the defining function has been normalized by EDG, only the non-defining declaration will have a source position.
templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(templateInstantiationFunction->get_firstNondefiningDeclaration());
SgTemplateFunctionDeclaration* templateFunctionDeclaration = templateInstantiationFunction->get_templateDeclaration();
if (templateFunctionDeclaration != NULL)
{
retval = filter->operator()(templateFunctionDeclaration);
}
else
{
// Assume false.
}
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf (" --- case of templateInstantiationFunction: retval = %s \n",retval ? "true" : "false");
#endif
}
else
{
if (templateInstantiationMemberFunction != NULL)
{
// When the defining function has been normalized by EDG, only the non-defining declaration will have a source position.
templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(templateInstantiationMemberFunction->get_firstNondefiningDeclaration());
SgTemplateMemberFunctionDeclaration* templateMemberFunctionDeclaration = templateInstantiationMemberFunction->get_templateDeclaration();
if (templateMemberFunctionDeclaration != NULL)
{
retval = filter->operator()(templateMemberFunctionDeclaration);
}
else
{
// Assume false.
}
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf (" --- case of templateInstantiationMemberFunction: retval = %s \n",retval ? "true" : "false");
#endif
}
}
return retval;
}
void detectCycleInType(SgType * type, const std::string & from);
}// end of namespace
#endif
|
grid_basis.c | /*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "cint.h"
#include "config.h"
// 128s42p21d12f8g6h4i3j
#define NCTR_CART 128
// 72s24p14d10f8g6h5i4j
#define NCTR_SPH 72
#define NPRIMAX 64
#define BLKSIZE 96
#define EXPCUTOFF 50 // 1e-22
#define MIN(X,Y) ((X)<(Y)?(X):(Y))
#define MAX(X,Y) ((X)>(Y)?(X):(Y))
void VXCnr_ao_screen(signed char *non0table, double *coord,
int ngrids, int blksize,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int nblk = (ngrids+blksize-1) / blksize;
int ib, i, j;
int np, nc, atm_id, bas_id;
double rr, arr, maxc;
double logcoeff[NPRIMAX];
double dr[3];
double *p_exp, *pcoeff, *pcoord, *ratm;
memset(non0table, 0, sizeof(signed char) * nblk*nbas);
for (bas_id = 0; bas_id < nbas; bas_id++) {
np = bas[NPRIM_OF];
nc = bas[NCTR_OF ];
p_exp = env + bas[PTR_EXP];
pcoeff = env + bas[PTR_COEFF];
atm_id = bas[ATOM_OF];
ratm = env + atm[atm_id*ATM_SLOTS+PTR_COORD];
for (j = 0; j < np; j++) {
maxc = 0;
for (i = 0; i < nc; i++) {
maxc = MAX(maxc, fabs(pcoeff[i*np+j]));
}
logcoeff[j] = log(maxc);
}
pcoord = coord;
for (ib = 0; ib < nblk; ib++) {
for (i = 0; i < MIN(ngrids-ib*blksize, blksize); i++) {
dr[0] = pcoord[i*3+0] - ratm[0];
dr[1] = pcoord[i*3+1] - ratm[1];
dr[2] = pcoord[i*3+2] - ratm[2];
rr = dr[0]*dr[0] + dr[1]*dr[1] + dr[2]*dr[2];
for (j = 0; j < np; j++) {
arr = p_exp[j] * rr;
if (arr-logcoeff[j] < EXPCUTOFF) {
non0table[ib*nbas+bas_id] = 1;
goto next_blk;
}
}
}
next_blk:
pcoord += blksize*3;
}
bas += BAS_SLOTS;
}
}
void VXCgen_grid(double *out, double *coords, double *atm_coords,
double *radii_table, int natm, int ngrids)
{
int i, j, n;
double dx, dy, dz, dist;
double *grid_dist = malloc(sizeof(double) * natm*ngrids);
for (i = 0; i < natm; i++) {
for (n = 0; n < ngrids; n++) {
dx = coords[n*3+0] - atm_coords[i*3+0];
dy = coords[n*3+1] - atm_coords[i*3+1];
dz = coords[n*3+2] - atm_coords[i*3+2];
grid_dist[i*ngrids+n] = sqrt(dx*dx + dy*dy + dz*dz);
}
}
for (n = 0; n < natm*ngrids; n++) {
out[n] = 1;
}
#pragma omp parallel default(none) \
shared(out, grid_dist, atm_coords, radii_table, natm, ngrids) \
private(i, j, n, dx, dy, dz)
{
double *buf = malloc(sizeof(double) * natm*ngrids);
for (i = 0; i < natm*ngrids; i++) {
buf[i] = 1;
}
int ij;
double fac;
double g[ngrids];
#pragma omp for nowait schedule(static)
for (ij = 0; ij < natm*natm; ij++) {
i = ij / natm;
j = ij % natm;
if (i <= j) {
continue;
}
dx = atm_coords[i*3+0] - atm_coords[j*3+0];
dy = atm_coords[i*3+1] - atm_coords[j*3+1];
dz = atm_coords[i*3+2] - atm_coords[j*3+2];
fac = 1 / sqrt(dx*dx + dy*dy + dz*dz);
for (n = 0; n < ngrids; n++) {
g[n] = grid_dist[i*ngrids+n] - grid_dist[j*ngrids+n];
g[n] *= fac;
}
if (radii_table != NULL) {
fac = radii_table[i*natm+j];
for (n = 0; n < ngrids; n++) {
g[n] += fac * (1 - g[n]*g[n]);
}
}
for (n = 0; n < ngrids; n++) {
g[n] = (3 - g[n]*g[n]) * g[n] * .5;
}
for (n = 0; n < ngrids; n++) {
g[n] = (3 - g[n]*g[n]) * g[n] * .5;
}
for (n = 0; n < ngrids; n++) {
g[n] = (3 - g[n]*g[n]) * g[n] * .5;
g[n] *= .5;
}
for (n = 0; n < ngrids; n++) {
buf[i*ngrids+n] *= .5 - g[n];
buf[j*ngrids+n] *= .5 + g[n];
}
}
#pragma omp critical
for (i = 0; i < natm*ngrids; i++) {
out[i] *= buf[i];
}
free(buf);
}
free(grid_dist);
}
|
mesonfield_impl.h | #ifndef MESON_FIELD_IMPL
#define MESON_FIELD_IMPL
#include<alg/a2a/mult_impl.h>
#include<alg/a2a/mult_vMv_split.h>
#include<alg/a2a/mult_vMv_split_grid.h>
#include<alg/a2a/mult_vMv_impl.h>
#include<alg/a2a/mult_vv_impl.h>
#include<alg/a2a/mesonfield_io.h>
#include<alg/a2a/mesonfield_compute_impl.h>
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR>
void A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>::plus_equals(const A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR> &with, const bool parallel){
if(nmodes_l != with.nmodes_l || nmodes_r != with.nmodes_r ||
!lindexdilution.paramsEqual(with.lindexdilution) || !rindexdilution.paramsEqual(with.rindexdilution) ){
ERR.General("A2AmesonField","plus_equals(..)","Second meson field must have the same underlying parameters\n");
}
if(parallel){
#pragma omp_parallel for
for(int i=0;i<fsize;i++) mf[i] += with.mf[i];
}else{
for(int i=0;i<fsize;i++) mf[i] += with.mf[i];
}
}
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR>
void A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>::times_equals(const ScalarComplexType f,const bool parallel){
if(parallel){
#pragma omp_parallel for
for(int i=0;i<fsize;i++) mf[i] *= f;
}else{
for(int i=0;i<fsize;i++) mf[i] *= f;
}
}
template<typename T>
inline std::complex<T> complexAvg(const std::complex<T>&a, const std::complex<T> &b){
return (a+b)/T(2.0);
}
//Replace this meson field with the average of this and a second field, 'with'
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR>
void A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>::average(const A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR> &with, const bool parallel){
if(nmodes_l != with.nmodes_l || nmodes_r != with.nmodes_r ||
!lindexdilution.paramsEqual(with.lindexdilution) || !rindexdilution.paramsEqual(with.rindexdilution) ){
ERR.General("A2AmesonField","average(..)","Second meson field must have the same underlying parameters\n");
}
if(parallel){
#pragma omp_parallel for
for(int i=0;i<fsize;i++) mf[i] = complexAvg(mf[i],with.mf[i]);//(mf[i] + with.mf[i])/2.0;
}else{
for(int i=0;i<fsize;i++) mf[i] = complexAvg(mf[i],with.mf[i]);//(mf[i] + with.mf[i])/2.0;
}
}
//Reorder the rows so that all the elements in idx_map are sequential. Indices not in map are ignored. Use at your own risk
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR>
void A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>::rowReorder(A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR> &into, const int idx_map[], int map_size, bool parallel) const{
into.setup(lindexdilution, rindexdilution, tl, tr);
#define DOIT \
int irow = idx_map[i]; \
for(int j=0;j<nmodes_r;j++) \
into(i,j) = (*this)(irow,j);
if(parallel){
#pragma omp parallel for
for(int i=0;i<map_size;i++){
DOIT;
}
}else{
for(int i=0;i<map_size;i++){
DOIT;
}
}
#undef DOIT
}
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR>
void A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>::colReorder(A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR> &into, const int idx_map[], int map_size, bool parallel) const{
into.setup(lindexdilution, rindexdilution, tl, tr);
#define DOIT \
for(int j=0;j<map_size;j++){ \
int jcol = idx_map[j]; \
into(i,j) = (*this)(i,jcol); \
}
if(parallel){
#pragma omp parallel for
for(int i=0;i<nmodes_l;i++){
DOIT;
}
}else{
for(int i=0;i<nmodes_l;i++){
DOIT;
}
}
}
//Do a column reorder but where we pack the row indices to exclude those not used (as indicated by input bool array)
//Output as a GSL matrix
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR>
typename gsl_wrapper<typename mf_Policies::ScalarComplexType::value_type>::matrix_complex * A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>::GSLpackedColReorder(const int idx_map[], int map_size, bool rowidx_used[], typename gsl_wrapper<typename ScalarComplexType::value_type>::matrix_complex *reuse ) const{
typedef gsl_wrapper<typename ScalarComplexType::value_type> gw;
assert(sizeof(typename gw::complex) == sizeof(ScalarComplexType));
int rows = nmodes_l;
int cols = nmodes_r;
int nrows_used = 0;
for(int i_full=0;i_full<rows;i_full++) if(rowidx_used[i_full]) nrows_used++;
typename gw::matrix_complex *M_packed;
if(reuse!=NULL){
M_packed = reuse;
M_packed->size1 = nrows_used;
M_packed->size2 = M_packed->tda = map_size;
}else M_packed = gw::matrix_complex_alloc(nrows_used,map_size);
//Look for contiguous blocks in the idx_map we can take advantage of
std::vector<std::pair<int,int> > blocks;
find_contiguous_blocks(blocks,idx_map,map_size);
int i_packed = 0;
for(int i_full=0;i_full<rows;i_full++){
if(rowidx_used[i_full]){
ScalarComplexType const* mf_row_base = mf + nmodes_r*i_full; //meson field are row major so columns are contiguous
typename gw::complex* row_base = gw::matrix_complex_ptr(M_packed,i_packed,0); //GSL matrix are also row major
for(int b=0;b<blocks.size();b++){
ScalarComplexType const* block_ptr = mf_row_base + idx_map[blocks[b].first];
memcpy((void*)row_base,(void*)block_ptr,blocks[b].second*sizeof(ScalarComplexType));
row_base += blocks[b].second;
}
i_packed++;
}
}
return M_packed;
}
#ifdef USE_GRID
//Do a column reorder but where we pack the row indices to exclude those not used (as indicated by input bool array)
//Output to a linearized matrix of Grid SIMD vectors where we have splatted the scalar onto all SIMD lanes
//Does not set the size of the output vector, allowing reuse of a previously allocated vector providing it's large enough
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR>
void A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>::splatPackedColReorder(Grid::Vector<typename mf_Policies::ComplexType> &into, const int idx_map[], int map_size, bool rowidx_used[], bool do_resize) const{
typedef typename mf_Policies::ComplexType VectorComplexType;
int full_rows = nmodes_l;
int full_cols = nmodes_r;
int nrows_used = 0;
for(int i_full=0;i_full<full_rows;i_full++) if(rowidx_used[i_full]) nrows_used++;
if(do_resize) into.resize(nrows_used*map_size);
//Look for contiguous blocks in the idx_map we can take advantage of
std::vector<std::pair<int,int> > blocks;
find_contiguous_blocks(blocks,idx_map,map_size);
int i_packed = 0;
for(int i_full=0;i_full<full_rows;i_full++){
if(rowidx_used[i_full]){
ScalarComplexType const* mf_row_base = mf + nmodes_r*i_full;
VectorComplexType* row_base = &into[map_size*i_packed];
for(int b=0;b<blocks.size();b++){
ScalarComplexType const* block_ptr = mf_row_base + idx_map[blocks[b].first];
for(int bb=0;bb<blocks[b].second;bb++)
vsplat(row_base[bb],block_ptr[bb]);
row_base += blocks[b].second;
}
i_packed++;
}
}
}
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR>
void A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>::scalarPackedColReorder(Grid::Vector<typename mf_Policies::ScalarComplexType> &into, const int idx_map[], int map_size, bool rowidx_used[], bool do_resize) const{
int full_rows = nmodes_l;
int full_cols = nmodes_r;
int nrows_used = 0;
for(int i_full=0;i_full<full_rows;i_full++) if(rowidx_used[i_full]) nrows_used++;
if(do_resize) into.resize(nrows_used*map_size);
//Look for contiguous blocks in the idx_map we can take advantage of
std::vector<std::pair<int,int> > blocks;
find_contiguous_blocks(blocks,idx_map,map_size);
int i_packed = 0;
for(int i_full=0;i_full<full_rows;i_full++){
if(rowidx_used[i_full]){
ScalarComplexType const* mf_row_base = mf + nmodes_r*i_full;
ScalarComplexType* row_base = &into[map_size*i_packed];
for(int b=0;b<blocks.size();b++){
ScalarComplexType const* block_ptr = mf_row_base + idx_map[blocks[b].first];
for(int bb=0;bb<blocks[b].second;bb++)
row_base[bb] = block_ptr[bb];
row_base += blocks[b].second;
}
i_packed++;
}
}
}
#endif
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR>
void A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>::transpose(A2AmesonField<mf_Policies,A2AfieldR,A2AfieldL> &into) const{
assert( (void*)this != (void*)&into );
into.setup(rindexdilution, lindexdilution, tr, tl);
#pragma omp parallel for
for(int i=0;i<nmodes_l;i++)
for(int j=0;j<nmodes_r;j++)
into(j,i) = (*this)(i,j);
}
//Compute l^ij(t1,t2) r^ji(t3,t4)
//Threaded but *node local*
template<typename mf_Policies,
template <typename> class lA2AfieldL, template <typename> class lA2AfieldR,
template <typename> class rA2AfieldL, template <typename> class rA2AfieldR
>
typename mf_Policies::ScalarComplexType trace(const A2AmesonField<mf_Policies,lA2AfieldL,lA2AfieldR> &l, const A2AmesonField<mf_Policies,rA2AfieldL,rA2AfieldR> &r){
//Check the indices match
if(! l.getRowParams().paramsEqual( r.getColParams() ) || ! l.getColParams().paramsEqual( r.getRowParams() ) )
ERR.General("","trace(const A2AmesonField<mf_Policies,lA2AfieldL,lA2AfieldR> &, const A2AmesonField<mf_Policies,rA2AfieldL,rA2AfieldR> &)","Illegal matrix product: underlying vector parameters must match\n");
typedef typename mf_Policies::ScalarComplexType ScalarComplexType;
ScalarComplexType into(0,0);
typedef typename A2AmesonField<mf_Policies,lA2AfieldL,lA2AfieldR>::LeftDilutionType DilType0;
typedef typename A2AmesonField<mf_Policies,lA2AfieldL,lA2AfieldR>::RightDilutionType DilType1;
typedef typename A2AmesonField<mf_Policies,rA2AfieldL,rA2AfieldR>::LeftDilutionType DilType2;
typedef typename A2AmesonField<mf_Policies,rA2AfieldL,rA2AfieldR>::RightDilutionType DilType3;
ModeContractionIndices<DilType0,DilType3> i_ind(l.getRowParams());
ModeContractionIndices<DilType1,DilType2> j_ind(r.getRowParams());
int times[4] = { l.getRowTimeslice(), l.getColTimeslice(), r.getRowTimeslice(), r.getColTimeslice() };
//W * W is only non-zero when the timeslice upon which we evaluate them are equal
const int n_threads = omp_get_max_threads();
std::vector<ScalarComplexType> ret_vec(n_threads,(0.,0.));
modeIndexSet lip; lip.time = times[0];
modeIndexSet rip; rip.time = times[3];
modeIndexSet ljp; ljp.time = times[1];
modeIndexSet rjp; rjp.time = times[2];
const int &ni = i_ind.getNindices(lip,rip); //how many indices to loop over
const int &nj = j_ind.getNindices(ljp,rjp);
#ifndef MEMTEST_MODE
#pragma omp parallel for
for(int i = 0; i < ni; i++){
int id = omp_get_thread_num();
int li = i_ind.getLeftIndex(i,lip,rip);
int ri = i_ind.getRightIndex(i,lip,rip);
for(int j = 0; j < nj; j++){
int lj = j_ind.getLeftIndex(j,ljp,rjp);
int rj = j_ind.getRightIndex(j,ljp,rjp);
ret_vec[id] += l(li,lj) * r(rj,ri);
}
}
for(int i=0;i<n_threads;i++) into += ret_vec[i];
#endif
return into;
}
//Compute l^ij(t1,t2) r^ji(t3,t4) for all t1, t4 and place into matrix element t1,t4
//This is both threaded and distributed over nodes
template<typename mf_Policies,
template <typename> class lA2AfieldL, template <typename> class lA2AfieldR,
template <typename> class rA2AfieldL, template <typename> class rA2AfieldR
>
void trace(fMatrix<typename mf_Policies::ScalarComplexType> &into, const std::vector<A2AmesonField<mf_Policies,lA2AfieldL,lA2AfieldR> > &l, const std::vector<A2AmesonField<mf_Policies,rA2AfieldL,rA2AfieldR> > &r){
//Distribute load over all nodes
int lsize = l.size();
int rsize = r.size();
into.resize(lsize,rsize);
int nodes = 1; for(int i=0;i<5;i++) nodes *= GJP.Nodes(i);
int work = lsize*rsize;
bool do_work = true;
if(nodes > work){
nodes = work; if(UniqueID() >= work) do_work = false; //too many nodes, at least for this parallelization. Might want to consider parallelizing in a different way!
}
int node_work = work/nodes;
if(node_work * nodes < work && !UniqueID()) node_work += work - node_work * nodes; //node 0 mops up remainder
int node_off = UniqueID()*node_work;
#ifndef MEMTEST_MODE
if(do_work){
for(int tt=node_off; tt<node_off + node_work; tt++){
int rem = tt;
int tsnk = rem % lsize; rem /= lsize; //sink time
int tsrc = rem; //source time
into(tsnk,tsrc) = trace(l[tsnk],r[tsrc]);
}
}
into.nodeSum(); //give all nodes a copy
#endif
}
struct nodeDistributeCounter{
//Get a rank idx that cycles between 0... nodes-1
static int getNext(){
static int cur = 0;
static int nodes = -1;
if(nodes == -1){
nodes = 1; for(int i=0;i<5;i++) nodes *= GJP.Nodes(i);
}
int out = cur;
cur = (cur + 1) % nodes;
return out;
}
};
//Delete all the data associated with this meson field apart from on node with UniqueID 'node'. The node index is saved so that the data can be later retrieved.
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR>
void A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>::nodeDistribute(int node_uniqueid){
if(node_uniqueid == -1) node_uniqueid = nodeDistributeCounter::getNext(); //draw the next node index from the pool
int nodes = 1; for(int i=0;i<5;i++) nodes *= GJP.Nodes(i);
if(node_uniqueid < 0 || node_uniqueid >= nodes) ERR.General("A2AmesonField","nodeDistribute","Invalid node rank %d\n", node_uniqueid);
#ifndef USE_MPI
if(nodes > 1) ERR.General("A2AmesonField","nodeDistribute","Implementation requires MPI\n");
//For one node, don't do anything
#else
//if(!UniqueID()){ printf("nodeDistribute start with destination node UniqueID %d\n",node_uniqueid); fflush(stdout); }
MPI_Barrier(MPI_COMM_WORLD);
//MPI rank and CPS UniqueID may not be the same. Translate
int my_rank;
int ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if(ret != MPI_SUCCESS) ERR.General("A2AmesonField","nodeDistribute","Comm_rank failed\n");
if(mf == NULL){ printf("Error nodeDistribute: Rank %d has null pointer!\n",my_rank); fflush(stdout); exit(-1); }
//printf("UniqueID %d -> MPI rank %d\n",UniqueID(),my_rank); fflush(stdout);
int rank_tmp = (UniqueID() == node_uniqueid ? my_rank : 0);
ret = MPI_Allreduce(&rank_tmp,&node_mpi_rank, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); //node is now the MPI rank corresponding to UniqueID == _node
if(ret != MPI_SUCCESS) ERR.General("A2AmesonField","nodeDistribute","Reduce failed\n");
MPI_Barrier(MPI_COMM_WORLD);
//printf("UniqueID %d (MPI rank %d) got storage rank %d\n",UniqueID(),my_rank,node_mpi_rank); fflush(stdout);
if(UniqueID() != node_uniqueid){
//printf("UniqueID %d (MPI rank %d) free'd memory\n",UniqueID(),my_rank); fflush(stdout);
free(mf); mf = NULL;
}//else{ printf("UniqueID %d (MPI rank %d) is storage node, not freeing\n",UniqueID(),my_rank); fflush(stdout); }
//if(!UniqueID()) printf("A2AmesonField::nodeDistribute %f MB stored on node %d (MPI rank %d)\n",(double)byte_size()/(1024.*1024.), node_uniqueid, node_mpi_rank);
//if(my_rank == node_mpi_rank) printf("A2AmesonField::nodeDistribute I am node with MPI rank %d and I have UniqueID %d, my first elem remains %f\n",my_rank,UniqueID(),mf[0]);
#endif
}
#ifdef USE_MPI
template<typename mf_Float>
struct getMPIdataType{
};
template<>
struct getMPIdataType<double>{
static MPI_Datatype doit(){ return MPI_DOUBLE; }
};
template<>
struct getMPIdataType<float>{
static MPI_Datatype doit(){ return MPI_FLOAT; }
};
#endif
//Get back the data. After the call, all nodes will have a complete copy
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR>
void A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>::nodeGet(){
typedef typename ScalarComplexType::value_type mf_Float;
if(node_mpi_rank == -1) return; //already on all nodes
#ifndef USE_MPI
int nodes = 1; for(int i=0;i<5;i++) nodes *= GJP.Nodes(i);
if(nodes > 1) ERR.General("A2AmesonField","nodeGet","Implementation requires MPI\n");
#else
int mpi_rank; //get this node's MPI rank
int ret = MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
if(ret != MPI_SUCCESS) ERR.General("A2AmesonField","nodeGet","Comm_rank failed\n");
if(mpi_rank != node_mpi_rank){
//if(mf != NULL) printf("rank %d pointer should be NULL but it isn't!\n",mpi_rank); fflush(stdout);
mf = (ScalarComplexType*)malloc(byte_size());
if(mf == NULL){ printf("rank %d failed to allocate memory!\n",mpi_rank); fflush(stdout); exit(-1); }
//printf("rank %d allocated memory\n",mpi_rank); fflush(stdout);
}//else{ printf("rank %d is root, first element of data %f\n",mpi_rank,mf[0]); fflush(stdout); }
MPI_Datatype dtype = getMPIdataType<mf_Float>::doit();
int dsize;
MPI_Type_size(dtype,&dsize);
if(sizeof(mf_Float) != dsize){
if(!UniqueID()){ printf("MPI size mismatch, want %d, got %d\n",sizeof(mf_Float),dsize); fflush(stdout); }
MPI_Barrier(MPI_COMM_WORLD);
exit(-1);
}
MPI_Barrier(MPI_COMM_WORLD);
ret = MPI_Bcast((void*)mf, 2*fsize, dtype, node_mpi_rank, MPI_COMM_WORLD);
if(ret != MPI_SUCCESS){
printf("rank %d Bcast fail\n",mpi_rank,mf[0]); fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD);
exit(-1);
}
//printf("rank %d completed Bcast, first element %f\n",mpi_rank,mf[0]); fflush(stdout);
//if(mpi_rank == node_mpi_rank) printf("A2AmesonField::nodeGet %f MB gathered from node %d (MPI rank %d)\n",(double)byte_size()/(1024.*1024.), UniqueID(), node_mpi_rank);
node_mpi_rank = -1; //now on all nodes
MPI_Barrier(MPI_COMM_WORLD);
#endif
}
#endif
|
OpenMPClause.h | //===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file defines OpenMP AST classes for clauses.
/// There are clauses for executable directives, clauses for declarative
/// directives and clauses which can be used in both kinds of directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H
#define LLVM_CLANG_AST_OPENMPCLAUSE_H
#include "clang/AST/Decl.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/TrailingObjects.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <utility>
namespace clang {
class ASTContext;
//===----------------------------------------------------------------------===//
// AST classes for clauses.
//===----------------------------------------------------------------------===//
/// This is a basic class for representing single OpenMP clause.
class OMPClause {
/// Starting location of the clause (the clause keyword).
SourceLocation StartLoc;
/// Ending location of the clause.
SourceLocation EndLoc;
/// Kind of the clause.
OpenMPClauseKind Kind;
protected:
OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc)
: StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {}
public:
/// Returns the starting location of the clause.
SourceLocation getBeginLoc() const { return StartLoc; }
/// Returns the ending location of the clause.
SourceLocation getEndLoc() const { return EndLoc; }
/// Sets the starting location of the clause.
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// Sets the ending location of the clause.
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// Returns kind of OpenMP clause (private, shared, reduction, etc.).
OpenMPClauseKind getClauseKind() const { return Kind; }
bool isImplicit() const { return StartLoc.isInvalid(); }
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<OMPClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
/// Get the iterator range for the expressions used in the clauses. Used
/// expressions include only the children that must be evaluated at the
/// runtime before entering the construct.
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *) { return true; }
};
/// Class that handles pre-initialization statement for some clauses, like
/// 'shedule', 'firstprivate' etc.
class OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Pre-initialization statement for the clause.
Stmt *PreInit = nullptr;
/// Region that captures the associated stmt.
OpenMPDirectiveKind CaptureRegion = llvm::omp::OMPD_unknown;
protected:
OMPClauseWithPreInit(const OMPClause *This) {
assert(get(This) && "get is not tuned for pre-init.");
}
/// Set pre-initialization statement for the clause.
void
setPreInitStmt(Stmt *S,
OpenMPDirectiveKind ThisRegion = llvm::omp::OMPD_unknown) {
PreInit = S;
CaptureRegion = ThisRegion;
}
public:
/// Get pre-initialization statement for the clause.
const Stmt *getPreInitStmt() const { return PreInit; }
/// Get pre-initialization statement for the clause.
Stmt *getPreInitStmt() { return PreInit; }
/// Get capture region for the stmt in the clause.
OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; }
static OMPClauseWithPreInit *get(OMPClause *C);
static const OMPClauseWithPreInit *get(const OMPClause *C);
};
/// Class that handles post-update expression for some clauses, like
/// 'lastprivate', 'reduction' etc.
class OMPClauseWithPostUpdate : public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Post-update expression for the clause.
Expr *PostUpdate = nullptr;
protected:
OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) {
assert(get(This) && "get is not tuned for post-update.");
}
/// Set pre-initialization statement for the clause.
void setPostUpdateExpr(Expr *S) { PostUpdate = S; }
public:
/// Get post-update expression for the clause.
const Expr *getPostUpdateExpr() const { return PostUpdate; }
/// Get post-update expression for the clause.
Expr *getPostUpdateExpr() { return PostUpdate; }
static OMPClauseWithPostUpdate *get(OMPClause *C);
static const OMPClauseWithPostUpdate *get(const OMPClause *C);
};
/// This structure contains most locations needed for by an OMPVarListClause.
struct OMPVarListLocTy {
/// Starting location of the clause (the clause keyword).
SourceLocation StartLoc;
/// Location of '('.
SourceLocation LParenLoc;
/// Ending location of the clause.
SourceLocation EndLoc;
OMPVarListLocTy() = default;
OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {}
};
/// This represents clauses with the list of variables like 'private',
/// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the
/// '#pragma omp ...' directives.
template <class T> class OMPVarListClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of variables in the list.
unsigned NumVars;
protected:
/// Build a clause with \a N variables
///
/// \param K Kind of the clause.
/// \param StartLoc Starting location of the clause (the clause keyword).
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N)
: OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {}
/// Fetches list of variables associated with this clause.
MutableArrayRef<Expr *> getVarRefs() {
return MutableArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars);
}
/// Sets the list of variables for this clause.
void setVarRefs(ArrayRef<Expr *> VL) {
assert(VL.size() == NumVars &&
"Number of variables is not the same as the preallocated buffer");
std::copy(VL.begin(), VL.end(),
static_cast<T *>(this)->template getTrailingObjects<Expr *>());
}
public:
using varlist_iterator = MutableArrayRef<Expr *>::iterator;
using varlist_const_iterator = ArrayRef<const Expr *>::iterator;
using varlist_range = llvm::iterator_range<varlist_iterator>;
using varlist_const_range = llvm::iterator_range<varlist_const_iterator>;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVarRefs().begin(); }
varlist_iterator varlist_end() { return getVarRefs().end(); }
varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); }
varlist_const_iterator varlist_end() const { return getVarRefs().end(); }
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Fetches list of all variables in the clause.
ArrayRef<const Expr *> getVarRefs() const {
return llvm::makeArrayRef(
static_cast<const T *>(this)->template getTrailingObjects<Expr *>(),
NumVars);
}
};
/// This represents 'allocator' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp allocate(a) allocator(omp_default_mem_alloc)
/// \endcode
/// In this example directive '#pragma omp allocate' has simple 'allocator'
/// clause with the allocator 'omp_default_mem_alloc'.
class OMPAllocatorClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Expression with the allocator.
Stmt *Allocator = nullptr;
/// Set allocator.
void setAllocator(Expr *A) { Allocator = A; }
public:
/// Build 'allocator' clause with the given allocator.
///
/// \param A Allocator.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_allocator, StartLoc, EndLoc), LParenLoc(LParenLoc),
Allocator(A) {}
/// Build an empty clause.
OMPAllocatorClause()
: OMPClause(OMPC_allocator, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns allocator.
Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); }
child_range children() { return child_range(&Allocator, &Allocator + 1); }
const_child_range children() const {
return const_child_range(&Allocator, &Allocator + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_allocator;
}
};
/// This represents clause 'allocate' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'private'
/// and clause 'allocate' for the variable 'a'.
class OMPAllocateClause final
: public OMPVarListClause<OMPAllocateClause>,
private llvm::TrailingObjects<OMPAllocateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Allocator specified in the clause, or 'nullptr' if the default one is
/// used.
Expr *Allocator = nullptr;
/// Position of the ':' delimiter in the clause;
SourceLocation ColonLoc;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Allocator Allocator expression.
/// \param ColonLoc Location of ':' delimiter.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
Expr *Allocator, SourceLocation ColonLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPAllocateClause>(OMPC_allocate, StartLoc, LParenLoc,
EndLoc, N),
Allocator(Allocator), ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPAllocateClause(unsigned N)
: OMPVarListClause<OMPAllocateClause>(OMPC_allocate, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
void setAllocator(Expr *A) { Allocator = A; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Allocator Allocator expression.
/// \param ColonLoc Location of ':' delimiter.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, Expr *Allocator,
SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Returns the allocator expression or nullptr, if no allocator is specified.
Expr *getAllocator() const { return Allocator; }
/// Returns the location of the ':' delimiter.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPAllocateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_allocate;
}
};
/// This represents 'if' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel if(parallel:a > 5)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'if' clause with
/// condition 'a > 5' and directive name modifier 'parallel'.
class OMPIfClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'if' clause.
Stmt *Condition = nullptr;
/// Location of ':' (if any).
SourceLocation ColonLoc;
/// Directive name modifier for the clause.
OpenMPDirectiveKind NameModifier = llvm::omp::OMPD_unknown;
/// Name modifier location.
SourceLocation NameModifierLoc;
/// Set condition.
void setCondition(Expr *Cond) { Condition = Cond; }
/// Set directive name modifier for the clause.
void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; }
/// Set location of directive name modifier for the clause.
void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; }
/// Set location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Build 'if' clause with condition \a Cond.
///
/// \param NameModifier [OpenMP 4.1] Directive name modifier of clause.
/// \param Cond Condition of the clause.
/// \param HelperCond Helper condition for the clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param NameModifierLoc Location of directive name modifier.
/// \param ColonLoc [OpenMP 4.1] Location of ':'.
/// \param EndLoc Ending location of the clause.
OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation NameModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc)
: OMPClause(OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc),
NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// Build an empty clause.
OMPIfClause()
: OMPClause(OMPC_if, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
/// Return directive name modifier associated with the clause.
OpenMPDirectiveKind getNameModifier() const { return NameModifier; }
/// Return the location of directive name modifier.
SourceLocation getNameModifierLoc() const { return NameModifierLoc; }
child_range children() { return child_range(&Condition, &Condition + 1); }
const_child_range children() const {
return const_child_range(&Condition, &Condition + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPIfClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_if;
}
};
/// This represents 'final' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task final(a > 5)
/// \endcode
/// In this example directive '#pragma omp task' has simple 'final'
/// clause with condition 'a > 5'.
class OMPFinalClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'if' clause.
Stmt *Condition = nullptr;
/// Set condition.
void setCondition(Expr *Cond) { Condition = Cond; }
public:
/// Build 'final' clause with condition \a Cond.
///
/// \param Cond Condition of the clause.
/// \param HelperCond Helper condition for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPFinalClause(Expr *Cond, Stmt *HelperCond,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_final, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Condition(Cond) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// Build an empty clause.
OMPFinalClause()
: OMPClause(OMPC_final, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
child_range children() { return child_range(&Condition, &Condition + 1); }
const_child_range children() const {
return const_child_range(&Condition, &Condition + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPFinalClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_final;
}
};
/// This represents 'num_threads' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel num_threads(6)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'num_threads'
/// clause with number of threads '6'.
class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'num_threads' clause.
Stmt *NumThreads = nullptr;
/// Set condition.
void setNumThreads(Expr *NThreads) { NumThreads = NThreads; }
public:
/// Build 'num_threads' clause with condition \a NumThreads.
///
/// \param NumThreads Number of threads for the construct.
/// \param HelperNumThreads Helper Number of threads for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_num_threads, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc),
NumThreads(NumThreads) {
setPreInitStmt(HelperNumThreads, CaptureRegion);
}
/// Build an empty clause.
OMPNumThreadsClause()
: OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns number of threads.
Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); }
child_range children() { return child_range(&NumThreads, &NumThreads + 1); }
const_child_range children() const {
return const_child_range(&NumThreads, &NumThreads + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_threads;
}
};
/// This represents 'safelen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd safelen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'safelen'
/// with single expression '4'.
/// If the safelen clause is used then no two iterations executed
/// concurrently with SIMD instructions can have a greater distance
/// in the logical iteration space than its value. The parameter of
/// the safelen clause must be a constant positive integer expression.
class OMPSafelenClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Safelen = nullptr;
/// Set safelen.
void setSafelen(Expr *Len) { Safelen = Len; }
public:
/// Build 'safelen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc),
Safelen(Len) {}
/// Build an empty clause.
explicit OMPSafelenClause()
: OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); }
child_range children() { return child_range(&Safelen, &Safelen + 1); }
const_child_range children() const {
return const_child_range(&Safelen, &Safelen + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_safelen;
}
};
/// This represents 'simdlen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd simdlen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'simdlen'
/// with single expression '4'.
/// If the 'simdlen' clause is used then it specifies the preferred number of
/// iterations to be executed concurrently. The parameter of the 'simdlen'
/// clause must be a constant positive integer expression.
class OMPSimdlenClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Simdlen = nullptr;
/// Set simdlen.
void setSimdlen(Expr *Len) { Simdlen = Len; }
public:
/// Build 'simdlen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc),
Simdlen(Len) {}
/// Build an empty clause.
explicit OMPSimdlenClause()
: OMPClause(OMPC_simdlen, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); }
child_range children() { return child_range(&Simdlen, &Simdlen + 1); }
const_child_range children() const {
return const_child_range(&Simdlen, &Simdlen + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_simdlen;
}
};
/// This represents 'collapse' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd collapse(3)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'collapse'
/// with single expression '3'.
/// The parameter must be a constant positive integer expression, it specifies
/// the number of nested loops that should be collapsed into a single iteration
/// space.
class OMPCollapseClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of for-loops.
Stmt *NumForLoops = nullptr;
/// Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// Build 'collapse' clause.
///
/// \param Num Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPCollapseClause(Expr *Num, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumForLoops(Num) {}
/// Build an empty clause.
explicit OMPCollapseClause()
: OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
const_child_range children() const {
return const_child_range(&NumForLoops, &NumForLoops + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_collapse;
}
};
/// This represents 'default' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel default(shared)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'default'
/// clause with kind 'shared'.
class OMPDefaultClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'default' clause.
llvm::omp::DefaultKind Kind = llvm::omp::OMP_DEFAULT_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clauses.
///
/// \param K Argument of clause.
void setDefaultKind(llvm::omp::DefaultKind K) { Kind = K; }
/// Set argument location.
///
/// \param KLoc Argument location.
void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'default' clause with argument \a A ('none' or 'shared').
///
/// \param A Argument of the clause ('none' or 'shared').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDefaultClause(llvm::omp::DefaultKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPDefaultClause()
: OMPClause(OMPC_default, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
llvm::omp::DefaultKind getDefaultKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_default;
}
};
/// This represents 'proc_bind' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel proc_bind(master)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'proc_bind'
/// clause with kind 'master'.
class OMPProcBindClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'proc_bind' clause.
llvm::omp::ProcBindKind Kind = llvm::omp::OMP_PROC_BIND_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clause.
///
/// \param K Kind of clause.
void setProcBindKind(llvm::omp::ProcBindKind K) { Kind = K; }
/// Set clause kind location.
///
/// \param KLoc Kind location.
void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'proc_bind' clause with argument \a A ('master', 'close' or
/// 'spread').
///
/// \param A Argument of the clause ('master', 'close' or 'spread').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPProcBindClause(llvm::omp::ProcBindKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPProcBindClause()
: OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
llvm::omp::ProcBindKind getProcBindKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_proc_bind;
}
};
/// This represents 'unified_address' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires unified_address
/// \endcode
/// In this example directive '#pragma omp requires' has 'unified_address'
/// clause.
class OMPUnifiedAddressClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'unified_address' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_unified_address, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUnifiedAddressClause()
: OMPClause(OMPC_unified_address, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_unified_address;
}
};
/// This represents 'unified_shared_memory' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires unified_shared_memory
/// \endcode
/// In this example directive '#pragma omp requires' has 'unified_shared_memory'
/// clause.
class OMPUnifiedSharedMemoryClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'unified_shared_memory' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_unified_shared_memory, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUnifiedSharedMemoryClause()
: OMPClause(OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_unified_shared_memory;
}
};
/// This represents 'reverse_offload' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires reverse_offload
/// \endcode
/// In this example directive '#pragma omp requires' has 'reverse_offload'
/// clause.
class OMPReverseOffloadClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'reverse_offload' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_reverse_offload, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReverseOffloadClause()
: OMPClause(OMPC_reverse_offload, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_reverse_offload;
}
};
/// This represents 'dynamic_allocators' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires dynamic_allocators
/// \endcode
/// In this example directive '#pragma omp requires' has 'dynamic_allocators'
/// clause.
class OMPDynamicAllocatorsClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'dynamic_allocators' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_dynamic_allocators, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPDynamicAllocatorsClause()
: OMPClause(OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_dynamic_allocators;
}
};
/// This represents 'atomic_default_mem_order' clause in the '#pragma omp
/// requires' directive.
///
/// \code
/// #pragma omp requires atomic_default_mem_order(seq_cst)
/// \endcode
/// In this example directive '#pragma omp requires' has simple
/// atomic_default_mem_order' clause with kind 'seq_cst'.
class OMPAtomicDefaultMemOrderClause final : public OMPClause {
friend class OMPClauseReader;
/// Location of '('
SourceLocation LParenLoc;
/// A kind of the 'atomic_default_mem_order' clause.
OpenMPAtomicDefaultMemOrderClauseKind Kind =
OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clause.
///
/// \param K Kind of clause.
void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) {
Kind = K;
}
/// Set clause kind location.
///
/// \param KLoc Kind location.
void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) {
KindKwLoc = KLoc;
}
public:
/// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst',
/// 'acq_rel' or 'relaxed').
///
/// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A,
SourceLocation ALoc, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_atomic_default_mem_order, StartLoc, EndLoc),
LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPAtomicDefaultMemOrderClause()
: OMPClause(OMPC_atomic_default_mem_order, SourceLocation(),
SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the locaiton of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const {
return Kind;
}
/// Returns location of clause kind.
SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_atomic_default_mem_order;
}
};
/// This represents 'schedule' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp for' has 'schedule' clause with
/// arguments 'static' and '3'.
class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'schedule' clause.
OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown;
/// Modifiers for 'schedule' clause.
enum {FIRST, SECOND, NUM_MODIFIERS};
OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS];
/// Locations of modifiers.
SourceLocation ModifiersLoc[NUM_MODIFIERS];
/// Start location of the schedule ind in source code.
SourceLocation KindLoc;
/// Location of ',' (if any).
SourceLocation CommaLoc;
/// Chunk size.
Expr *ChunkSize = nullptr;
/// Set schedule kind.
///
/// \param K Schedule kind.
void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; }
/// Set the first schedule modifier.
///
/// \param M Schedule modifier.
void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[FIRST] = M;
}
/// Set the second schedule modifier.
///
/// \param M Schedule modifier.
void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[SECOND] = M;
}
/// Set location of the first schedule modifier.
void setFirstScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[FIRST] = Loc;
}
/// Set location of the second schedule modifier.
void setSecondScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[SECOND] = Loc;
}
/// Set schedule modifier location.
///
/// \param M Schedule modifier location.
void setScheduleModifer(OpenMPScheduleClauseModifier M) {
if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown)
Modifiers[FIRST] = M;
else {
assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown);
Modifiers[SECOND] = M;
}
}
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// Set location of ','.
///
/// \param Loc Location of ','.
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// Set chunk size.
///
/// \param E Chunk size.
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// Build 'schedule' clause with schedule kind \a Kind and chunk size
/// expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind Schedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
/// \param M1 The first modifier applied to 'schedule' clause.
/// \param M1Loc Location of the first modifier
/// \param M2 The second modifier applied to 'schedule' clause.
/// \param M2Loc Location of the second modifier
OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc, OpenMPScheduleClauseKind Kind,
Expr *ChunkSize, Stmt *HelperChunkSize,
OpenMPScheduleClauseModifier M1, SourceLocation M1Loc,
OpenMPScheduleClauseModifier M2, SourceLocation M2Loc)
: OMPClause(OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc),
ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
Modifiers[FIRST] = M1;
Modifiers[SECOND] = M2;
ModifiersLoc[FIRST] = M1Loc;
ModifiersLoc[SECOND] = M2Loc;
}
/// Build an empty clause.
explicit OMPScheduleClause()
: OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {
Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown;
Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown;
}
/// Get kind of the clause.
OpenMPScheduleClauseKind getScheduleKind() const { return Kind; }
/// Get the first modifier of the clause.
OpenMPScheduleClauseModifier getFirstScheduleModifier() const {
return Modifiers[FIRST];
}
/// Get the second modifier of the clause.
OpenMPScheduleClauseModifier getSecondScheduleModifier() const {
return Modifiers[SECOND];
}
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getScheduleKindLoc() { return KindLoc; }
/// Get the first modifier location.
SourceLocation getFirstScheduleModifierLoc() const {
return ModifiersLoc[FIRST];
}
/// Get the second modifier location.
SourceLocation getSecondScheduleModifierLoc() const {
return ModifiersLoc[SECOND];
}
/// Get location of ','.
SourceLocation getCommaLoc() { return CommaLoc; }
/// Get chunk size.
Expr *getChunkSize() { return ChunkSize; }
/// Get chunk size.
const Expr *getChunkSize() const { return ChunkSize; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
const_child_range children() const {
auto Children = const_cast<OMPScheduleClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_schedule;
}
};
/// This represents 'ordered' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for ordered (2)
/// \endcode
/// In this example directive '#pragma omp for' has 'ordered' clause with
/// parameter 2.
class OMPOrderedClause final
: public OMPClause,
private llvm::TrailingObjects<OMPOrderedClause, Expr *> {
friend class OMPClauseReader;
friend TrailingObjects;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of for-loops.
Stmt *NumForLoops = nullptr;
/// Real number of loops.
unsigned NumberOfLoops = 0;
/// Build 'ordered' clause.
///
/// \param Num Expression, possibly associated with this clause.
/// \param NumLoops Number of loops, associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumForLoops(Num), NumberOfLoops(NumLoops) {}
/// Build an empty clause.
explicit OMPOrderedClause(unsigned NumLoops)
: OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()),
NumberOfLoops(NumLoops) {}
/// Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// Build 'ordered' clause.
///
/// \param Num Expression, possibly associated with this clause.
/// \param NumLoops Number of loops, associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
static OMPOrderedClause *Create(const ASTContext &C, Expr *Num,
unsigned NumLoops, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Build an empty clause.
static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops);
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
/// Set number of iterations for the specified loop.
void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations);
/// Get number of iterations for all the loops.
ArrayRef<Expr *> getLoopNumIterations() const;
/// Set loop counter for the specified loop.
void setLoopCounter(unsigned NumLoop, Expr *Counter);
/// Get loops counter for the specified loop.
Expr *getLoopCounter(unsigned NumLoop);
const Expr *getLoopCounter(unsigned NumLoop) const;
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
const_child_range children() const {
return const_child_range(&NumForLoops, &NumForLoops + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_ordered;
}
};
/// This represents 'nowait' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for nowait
/// \endcode
/// In this example directive '#pragma omp for' has 'nowait' clause.
class OMPNowaitClause : public OMPClause {
public:
/// Build 'nowait' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_nowait, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPNowaitClause()
: OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_nowait;
}
};
/// This represents 'untied' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task untied
/// \endcode
/// In this example directive '#pragma omp task' has 'untied' clause.
class OMPUntiedClause : public OMPClause {
public:
/// Build 'untied' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_untied, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUntiedClause()
: OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_untied;
}
};
/// This represents 'mergeable' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task mergeable
/// \endcode
/// In this example directive '#pragma omp task' has 'mergeable' clause.
class OMPMergeableClause : public OMPClause {
public:
/// Build 'mergeable' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_mergeable, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPMergeableClause()
: OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_mergeable;
}
};
/// This represents 'read' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic read
/// \endcode
/// In this example directive '#pragma omp atomic' has 'read' clause.
class OMPReadClause : public OMPClause {
public:
/// Build 'read' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_read, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_read;
}
};
/// This represents 'write' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic write
/// \endcode
/// In this example directive '#pragma omp atomic' has 'write' clause.
class OMPWriteClause : public OMPClause {
public:
/// Build 'write' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_write, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPWriteClause()
: OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_write;
}
};
/// This represents 'update' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic update
/// \endcode
/// In this example directive '#pragma omp atomic' has 'update' clause.
class OMPUpdateClause : public OMPClause {
public:
/// Build 'update' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_update, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUpdateClause()
: OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_update;
}
};
/// This represents 'capture' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has 'capture' clause.
class OMPCaptureClause : public OMPClause {
public:
/// Build 'capture' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_capture, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPCaptureClause()
: OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_capture;
}
};
/// This represents 'seq_cst' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic seq_cst
/// \endcode
/// In this example directive '#pragma omp atomic' has 'seq_cst' clause.
class OMPSeqCstClause : public OMPClause {
public:
/// Build 'seq_cst' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPSeqCstClause()
: OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_seq_cst;
}
};
/// This represents 'acq_rel' clause in the '#pragma omp atomic|flush'
/// directives.
///
/// \code
/// #pragma omp flush acq_rel
/// \endcode
/// In this example directive '#pragma omp flush' has 'acq_rel' clause.
class OMPAcqRelClause final : public OMPClause {
public:
/// Build 'ack_rel' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_acq_rel, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPAcqRelClause()
: OMPClause(OMPC_acq_rel, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_acq_rel;
}
};
/// This represents 'acquire' clause in the '#pragma omp atomic|flush'
/// directives.
///
/// \code
/// #pragma omp flush acquire
/// \endcode
/// In this example directive '#pragma omp flush' has 'acquire' clause.
class OMPAcquireClause final : public OMPClause {
public:
/// Build 'acquire' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_acquire, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPAcquireClause()
: OMPClause(OMPC_acquire, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_acquire;
}
};
/// This represents 'release' clause in the '#pragma omp atomic|flush'
/// directives.
///
/// \code
/// #pragma omp flush release
/// \endcode
/// In this example directive '#pragma omp flush' has 'release' clause.
class OMPReleaseClause final : public OMPClause {
public:
/// Build 'release' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_release, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReleaseClause()
: OMPClause(OMPC_release, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_release;
}
};
/// This represents 'relaxed' clause in the '#pragma omp atomic'
/// directives.
///
/// \code
/// #pragma omp atomic relaxed
/// \endcode
/// In this example directive '#pragma omp atomic' has 'relaxed' clause.
class OMPRelaxedClause final : public OMPClause {
public:
/// Build 'relaxed' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_relaxed, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPRelaxedClause()
: OMPClause(OMPC_relaxed, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_relaxed;
}
};
/// This represents clause 'private' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel private(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'private'
/// with the variables 'a' and 'b'.
class OMPPrivateClause final
: public OMPVarListClause<OMPPrivateClause>,
private llvm::TrailingObjects<OMPPrivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPPrivateClause(unsigned N)
: OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PrivateVL List of references to private copies with initializers.
static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PrivateVL);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPPrivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_private;
}
};
/// This represents clause 'firstprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel firstprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'firstprivate'
/// with the variables 'a' and 'b'.
class OMPFirstprivateClause final
: public OMPVarListClause<OMPFirstprivateClause>,
public OMPClauseWithPreInit,
private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPreInit(this) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPFirstprivateClause(unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(
OMPC_firstprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPreInit(this) {}
/// Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new
/// private variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// Gets the list of references to initializer variables for new
/// private variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the original variables.
/// \param PrivateVL List of references to private copies with initializers.
/// \param InitVL List of references to auto generated variables used for
/// initialization of a single array element. Used if firstprivate variable is
/// of array type.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
static OMPFirstprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL,
ArrayRef<Expr *> InitVL, Stmt *PreInit);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFirstprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range used_children() const {
auto Children = const_cast<OMPFirstprivateClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_firstprivate;
}
};
/// This represents clause 'lastprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd lastprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'lastprivate'
/// with the variables 'a' and 'b'.
class OMPLastprivateClause final
: public OMPVarListClause<OMPLastprivateClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLastprivateClause, Expr *> {
// There are 4 additional tail-allocated arrays at the end of the class:
// 1. Contains list of pseudo variables with the default initialization for
// each non-firstprivate variables. Used in codegen for initialization of
// lastprivate copies.
// 2. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents private variables
// (for arrays, single array element).
// 3. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents original variables
// (for arrays, single array element).
// 4. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of final assignment performed by the
// lastprivate clause.
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Optional lastprivate kind, e.g. 'conditional', if specified by user.
OpenMPLastprivateModifier LPKind;
/// Optional location of the lasptrivate kind, if specified by user.
SourceLocation LPKindLoc;
/// Optional colon location, if specified by user.
SourceLocation ColonLoc;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
unsigned N)
: OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), LPKind(LPKind), LPKindLoc(LPKindLoc),
ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPLastprivateClause(unsigned N)
: OMPVarListClause<OMPLastprivateClause>(
OMPC_lastprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Get the list of helper expressions for initialization of private
/// copies for lastprivate variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent original variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign private copy of the variable to original variable.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
/// Sets lastprivate kind.
void setKind(OpenMPLastprivateModifier Kind) { LPKind = Kind; }
/// Sets location of the lastprivate kind.
void setKindLoc(SourceLocation Loc) { LPKindLoc = Loc; }
/// Sets colon symbol location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// private variables (for arrays, single array element).
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// original variables (for arrays, single array element).
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// lastprivate clause.
/// \param LPKind Lastprivate kind, e.g. 'conditional'.
/// \param LPKindLoc Location of the lastprivate kind.
/// \param ColonLoc Location of the ':' symbol if lastprivate kind is used.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPLastprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps,
OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc,
SourceLocation ColonLoc, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Lastprivate kind.
OpenMPLastprivateModifier getKind() const { return LPKind; }
/// Returns the location of the lastprivate kind.
SourceLocation getKindLoc() const { return LPKindLoc; }
/// Returns the location of the ':' symbol, if any.
SourceLocation getColonLoc() const { return ColonLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
/// Set list of helper expressions, required for generation of private
/// copies of original lastprivate variables.
void setPrivateCopies(ArrayRef<Expr *> PrivateCopies);
helper_expr_const_range private_copies() const {
return helper_expr_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_range private_copies() {
return helper_expr_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPLastprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_lastprivate;
}
};
/// This represents clause 'shared' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel shared(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'shared'
/// with the variables 'a' and 'b'.
class OMPSharedClause final
: public OMPVarListClause<OMPSharedClause>,
private llvm::TrailingObjects<OMPSharedClause, Expr *> {
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPSharedClause(unsigned N)
: OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPSharedClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_shared;
}
};
/// This represents clause 'reduction' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'reduction'
/// with operator '+' and the variables 'a' and 'b'.
class OMPReductionClause final
: public OMPVarListClause<OMPReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPReductionClause(unsigned N)
: OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(),
SourceLocation(), SourceLocation(),
N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private copy of the reduction
/// variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent LHS expression in the final
/// reduction expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent RHS expression in the final
/// reduction expression performed by the reduction clause.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range used_children() const {
auto Children = const_cast<OMPReductionClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_reduction;
}
};
/// This represents clause 'task_reduction' in the '#pragma omp taskgroup'
/// directives.
///
/// \code
/// #pragma omp taskgroup task_reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp taskgroup' has clause
/// 'task_reduction' with operator '+' and the variables 'a' and 'b'.
class OMPTaskReductionClause final
: public OMPVarListClause<OMPTaskReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPTaskReductionClause>(OMPC_task_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPTaskReductionClause(unsigned N)
: OMPVarListClause<OMPTaskReductionClause>(
OMPC_task_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent private copy of the reduction variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent LHS expression in the final reduction
/// expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent RHS expression in the final reduction
/// expression performed by the reduction clause. Also, variables in these
/// expressions are used for proper initialization of reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPTaskReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPTaskReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_task_reduction;
}
};
/// This represents clause 'in_reduction' in the '#pragma omp task' directives.
///
/// \code
/// #pragma omp task in_reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp task' has clause 'in_reduction' with
/// operator '+' and the variables 'a' and 'b'.
class OMPInReductionClause final
: public OMPVarListClause<OMPInReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPInReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPInReductionClause>(OMPC_in_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPInReductionClause(unsigned N)
: OMPVarListClause<OMPInReductionClause>(
OMPC_in_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent private copy of the reduction variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent LHS expression in the final reduction
/// expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent RHS expression in the final reduction
/// expression performed by the reduction clause. Also, variables in these
/// expressions are used for proper initialization of reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
/// Set list of helper reduction taskgroup descriptors.
void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction taskgroup descriptors.
MutableArrayRef<Expr *> getTaskgroupDescriptors() {
return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size());
}
ArrayRef<const Expr *> getTaskgroupDescriptors() const {
return llvm::makeArrayRef(getReductionOps().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param TaskgroupDescriptors List of helper taskgroup descriptors for
/// corresponding items in parent taskgroup task_reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPInReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_const_range taskgroup_descriptors() const {
return helper_expr_const_range(getTaskgroupDescriptors().begin(),
getTaskgroupDescriptors().end());
}
helper_expr_range taskgroup_descriptors() {
return helper_expr_range(getTaskgroupDescriptors().begin(),
getTaskgroupDescriptors().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPInReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_in_reduction;
}
};
/// This represents clause 'linear' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd linear(a,b : 2)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'linear'
/// with variables 'a', 'b' and linear step '2'.
class OMPLinearClause final
: public OMPVarListClause<OMPLinearClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLinearClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Modifier of 'linear' clause.
OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val;
/// Location of linear modifier if any.
SourceLocation ModifierLoc;
/// Location of ':'.
SourceLocation ColonLoc;
/// Sets the linear step for clause.
void setStep(Expr *Step) { *(getFinals().end()) = Step; }
/// Sets the expression to calculate linear step for clause.
void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; }
/// Build 'linear' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc,
EndLoc, NumVars),
OMPClauseWithPostUpdate(this), Modifier(Modifier),
ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param NumVars Number of variables.
explicit OMPLinearClause(unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(),
SourceLocation(), SourceLocation(),
NumVars),
OMPClauseWithPostUpdate(this) {}
/// Gets the list of initial values for linear variables.
///
/// There are NumVars expressions with initial values allocated after the
/// varlist, they are followed by NumVars update expressions (used to update
/// the linear variable's value on current iteration) and they are followed by
/// NumVars final expressions (used to calculate the linear variable's
/// value after the loop body). After these lists, there are 2 helper
/// expressions - linear step and a helper to calculate it before the
/// loop body (used when the linear step is not constant):
///
/// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[];
/// Finals[]; Step; CalcStep; }
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Sets the list of update expressions for linear variables.
MutableArrayRef<Expr *> getUpdates() {
return MutableArrayRef<Expr *>(getInits().end(), varlist_size());
}
ArrayRef<const Expr *> getUpdates() const {
return llvm::makeArrayRef(getInits().end(), varlist_size());
}
/// Sets the list of final update expressions for linear variables.
MutableArrayRef<Expr *> getFinals() {
return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size());
}
ArrayRef<const Expr *> getFinals() const {
return llvm::makeArrayRef(getUpdates().end(), varlist_size());
}
/// Gets the list of used expressions for linear variables.
MutableArrayRef<Expr *> getUsedExprs() {
return MutableArrayRef<Expr *>(getFinals().end() + 2, varlist_size() + 1);
}
ArrayRef<const Expr *> getUsedExprs() const {
return llvm::makeArrayRef(getFinals().end() + 2, varlist_size() + 1);
}
/// Sets the list of the copies of original linear variables.
/// \param PL List of expressions.
void setPrivates(ArrayRef<Expr *> PL);
/// Sets the list of the initial values for linear variables.
/// \param IL List of expressions.
void setInits(ArrayRef<Expr *> IL);
public:
/// Creates clause with a list of variables \a VL and a linear step
/// \a Step.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Modifier Modifier of 'linear' clause.
/// \param ModifierLoc Modifier location.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PL List of private copies of original variables.
/// \param IL List of initial values for the variables.
/// \param Step Linear step.
/// \param CalcStep Calculation of the linear step.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPLinearClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// Set modifier.
void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; }
/// Return modifier.
OpenMPLinearClauseKind getModifier() const { return Modifier; }
/// Set modifier location.
void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; }
/// Return modifier location.
SourceLocation getModifierLoc() const { return ModifierLoc; }
/// Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns linear step.
Expr *getStep() { return *(getFinals().end()); }
/// Returns linear step.
const Expr *getStep() const { return *(getFinals().end()); }
/// Returns expression to calculate linear step.
Expr *getCalcStep() { return *(getFinals().end() + 1); }
/// Returns expression to calculate linear step.
const Expr *getCalcStep() const { return *(getFinals().end() + 1); }
/// Sets the list of update expressions for linear variables.
/// \param UL List of expressions.
void setUpdates(ArrayRef<Expr *> UL);
/// Sets the list of final update expressions for linear variables.
/// \param FL List of expressions.
void setFinals(ArrayRef<Expr *> FL);
/// Sets the list of used expressions for the linear clause.
void setUsedExprs(ArrayRef<Expr *> UE);
using privates_iterator = MutableArrayRef<Expr *>::iterator;
using privates_const_iterator = ArrayRef<const Expr *>::iterator;
using privates_range = llvm::iterator_range<privates_iterator>;
using privates_const_range = llvm::iterator_range<privates_const_iterator>;
privates_range privates() {
return privates_range(getPrivates().begin(), getPrivates().end());
}
privates_const_range privates() const {
return privates_const_range(getPrivates().begin(), getPrivates().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
using updates_iterator = MutableArrayRef<Expr *>::iterator;
using updates_const_iterator = ArrayRef<const Expr *>::iterator;
using updates_range = llvm::iterator_range<updates_iterator>;
using updates_const_range = llvm::iterator_range<updates_const_iterator>;
updates_range updates() {
return updates_range(getUpdates().begin(), getUpdates().end());
}
updates_const_range updates() const {
return updates_const_range(getUpdates().begin(), getUpdates().end());
}
using finals_iterator = MutableArrayRef<Expr *>::iterator;
using finals_const_iterator = ArrayRef<const Expr *>::iterator;
using finals_range = llvm::iterator_range<finals_iterator>;
using finals_const_range = llvm::iterator_range<finals_const_iterator>;
finals_range finals() {
return finals_range(getFinals().begin(), getFinals().end());
}
finals_const_range finals() const {
return finals_const_range(getFinals().begin(), getFinals().end());
}
using used_expressions_iterator = MutableArrayRef<Expr *>::iterator;
using used_expressions_const_iterator = ArrayRef<const Expr *>::iterator;
using used_expressions_range =
llvm::iterator_range<used_expressions_iterator>;
using used_expressions_const_range =
llvm::iterator_range<used_expressions_const_iterator>;
used_expressions_range used_expressions() {
return finals_range(getUsedExprs().begin(), getUsedExprs().end());
}
used_expressions_const_range used_expressions() const {
return finals_const_range(getUsedExprs().begin(), getUsedExprs().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPLinearClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPLinearClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_linear;
}
};
/// This represents clause 'aligned' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd aligned(a,b : 8)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'aligned'
/// with variables 'a', 'b' and alignment '8'.
class OMPAlignedClause final
: public OMPVarListClause<OMPAlignedClause>,
private llvm::TrailingObjects<OMPAlignedClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Sets the alignment for clause.
void setAlignment(Expr *A) { *varlist_end() = A; }
/// Build 'aligned' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc,
EndLoc, NumVars),
ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param NumVars Number of variables.
explicit OMPAlignedClause(unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(),
SourceLocation(), SourceLocation(),
NumVars) {}
public:
/// Creates clause with a list of variables \a VL and alignment \a A.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param A Alignment.
static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
Expr *A);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns alignment.
Expr *getAlignment() { return *varlist_end(); }
/// Returns alignment.
const Expr *getAlignment() const { return *varlist_end(); }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPAlignedClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_aligned;
}
};
/// This represents clause 'copyin' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel copyin(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'copyin'
/// with the variables 'a' and 'b'.
class OMPCopyinClause final
: public OMPVarListClause<OMPCopyinClause>,
private llvm::TrailingObjects<OMPCopyinClause, Expr *> {
// Class has 3 additional tail allocated arrays:
// 1. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents sources.
// 2. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents destinations.
// 3. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of propagation of master's thread values of
// threadprivate variables to local instances of that variables in other
// implicit threads.
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyinClause(unsigned N)
: OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyin clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyin clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of propagation of master's thread values of
/// threadprivate variables to local instances of that variables in other
/// implicit threads.
static OMPCopyinClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPCopyinClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_copyin;
}
};
/// This represents clause 'copyprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp single copyprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp single' has clause 'copyprivate'
/// with the variables 'a' and 'b'.
class OMPCopyprivateClause final
: public OMPVarListClause<OMPCopyprivateClause>,
private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyprivateClause(unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(
OMPC_copyprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// copyprivate clause.
static OMPCopyprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPCopyprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_copyprivate;
}
};
/// This represents implicit clause 'flush' for the '#pragma omp flush'
/// directive.
/// This clause does not exist by itself, it can be only as a part of 'omp
/// flush' directive. This clause is introduced to keep the original structure
/// of \a OMPExecutableDirective class and its derivatives and to use the
/// existing infrastructure of clauses with the list of variables.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has implicit clause 'flush'
/// with the variables 'a' and 'b'.
class OMPFlushClause final
: public OMPVarListClause<OMPFlushClause>,
private llvm::TrailingObjects<OMPFlushClause, Expr *> {
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPFlushClause(unsigned N)
: OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> VL);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFlushClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_flush;
}
};
/// This represents implicit clause 'depend' for the '#pragma omp task'
/// directive.
///
/// \code
/// #pragma omp task depend(in:a,b)
/// \endcode
/// In this example directive '#pragma omp task' with clause 'depend' with the
/// variables 'a' and 'b' with dependency 'in'.
class OMPDependClause final
: public OMPVarListClause<OMPDependClause>,
private llvm::TrailingObjects<OMPDependClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Dependency type (one of in, out, inout).
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
/// Dependency type location.
SourceLocation DepLoc;
/// Colon location.
SourceLocation ColonLoc;
/// Number of loops, associated with the depend clause.
unsigned NumLoops = 0;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N, unsigned NumLoops)
: OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc,
EndLoc, N), NumLoops(NumLoops) {}
/// Build an empty clause.
///
/// \param N Number of variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
explicit OMPDependClause(unsigned N, unsigned NumLoops)
: OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(),
SourceLocation(), SourceLocation(),
N),
NumLoops(NumLoops) {}
/// Set dependency kind.
void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; }
/// Set dependency kind and its location.
void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; }
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param DepKind Dependency type.
/// \param DepLoc Location of the dependency type.
/// \param ColonLoc Colon location.
/// \param VL List of references to the variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VL, unsigned NumLoops);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N,
unsigned NumLoops);
/// Get dependency type.
OpenMPDependClauseKind getDependencyKind() const { return DepKind; }
/// Get dependency type location.
SourceLocation getDependencyLoc() const { return DepLoc; }
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Get number of loops associated with the clause.
unsigned getNumLoops() const { return NumLoops; }
/// Set the loop data for the depend clauses with 'sink|source' kind of
/// dependency.
void setLoopData(unsigned NumLoop, Expr *Cnt);
/// Get the loop data.
Expr *getLoopData(unsigned NumLoop);
const Expr *getLoopData(unsigned NumLoop) const;
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPDependClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_depend;
}
};
/// This represents 'device' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp target device(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'device'
/// with single expression 'a'.
class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Device number.
Stmt *Device = nullptr;
/// Set the device number.
///
/// \param E Device number.
void setDevice(Expr *E) { Device = E; }
public:
/// Build 'device' clause.
///
/// \param E Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDeviceClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Device(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPDeviceClause()
: OMPClause(OMPC_device, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return device number.
Expr *getDevice() { return cast<Expr>(Device); }
/// Return device number.
Expr *getDevice() const { return cast<Expr>(Device); }
child_range children() { return child_range(&Device, &Device + 1); }
const_child_range children() const {
return const_child_range(&Device, &Device + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_device;
}
};
/// This represents 'threads' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered threads
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'threads' clause.
class OMPThreadsClause : public OMPClause {
public:
/// Build 'threads' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_threads, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPThreadsClause()
: OMPClause(OMPC_threads, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_threads;
}
};
/// This represents 'simd' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered simd
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'simd' clause.
class OMPSIMDClause : public OMPClause {
public:
/// Build 'simd' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_simd, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPSIMDClause() : OMPClause(OMPC_simd, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_simd;
}
};
/// Struct that defines common infrastructure to handle mappable
/// expressions used in OpenMP clauses.
class OMPClauseMappableExprCommon {
public:
/// Class that represents a component of a mappable expression. E.g.
/// for an expression S.a, the first component is a declaration reference
/// expression associated with 'S' and the second is a member expression
/// associated with the field declaration 'a'. If the expression is an array
/// subscript it may not have any associated declaration. In that case the
/// associated declaration is set to nullptr.
class MappableComponent {
/// Expression associated with the component.
Expr *AssociatedExpression = nullptr;
/// Declaration associated with the declaration. If the component does
/// not have a declaration (e.g. array subscripts or section), this is set
/// to nullptr.
ValueDecl *AssociatedDeclaration = nullptr;
public:
explicit MappableComponent() = default;
explicit MappableComponent(Expr *AssociatedExpression,
ValueDecl *AssociatedDeclaration)
: AssociatedExpression(AssociatedExpression),
AssociatedDeclaration(
AssociatedDeclaration
? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl())
: nullptr) {}
Expr *getAssociatedExpression() const { return AssociatedExpression; }
ValueDecl *getAssociatedDeclaration() const {
return AssociatedDeclaration;
}
};
// List of components of an expression. This first one is the whole
// expression and the last one is the base expression.
using MappableExprComponentList = SmallVector<MappableComponent, 8>;
using MappableExprComponentListRef = ArrayRef<MappableComponent>;
// List of all component lists associated to the same base declaration.
// E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have
// their component list but the same base declaration 'S'.
using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>;
using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>;
protected:
// Return the total number of elements in a list of component lists.
static unsigned
getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists);
// Return the total number of elements in a list of declarations. All
// declarations are expected to be canonical.
static unsigned
getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations);
};
/// This structure contains all sizes needed for by an
/// OMPMappableExprListClause.
struct OMPMappableExprListSizeTy {
/// Number of expressions listed.
unsigned NumVars;
/// Number of unique base declarations.
unsigned NumUniqueDeclarations;
/// Number of component lists.
unsigned NumComponentLists;
/// Total number of expression components.
unsigned NumComponents;
OMPMappableExprListSizeTy() = default;
OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations),
NumComponentLists(NumComponentLists), NumComponents(NumComponents) {}
};
/// This represents clauses with a list of expressions that are mappable.
/// Examples of these clauses are 'map' in
/// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from
/// in '#pragma omp target update...' directives.
template <class T>
class OMPMappableExprListClause : public OMPVarListClause<T>,
public OMPClauseMappableExprCommon {
friend class OMPClauseReader;
/// Number of unique declarations in this clause.
unsigned NumUniqueDeclarations;
/// Number of component lists in this clause.
unsigned NumComponentLists;
/// Total number of components in this clause.
unsigned NumComponents;
/// C++ nested name specifier for the associated user-defined mapper.
NestedNameSpecifierLoc MapperQualifierLoc;
/// The associated user-defined mapper identifier information.
DeclarationNameInfo MapperIdInfo;
protected:
/// Build a clause for \a NumUniqueDeclarations declarations, \a
/// NumComponentLists total component lists, and \a NumComponents total
/// components.
///
/// \param K Kind of the clause.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
/// \param MapperQualifierLocPtr C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfoPtr The identifier of associated user-defined mapper.
OMPMappableExprListClause(
OpenMPClauseKind K, const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes,
NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr,
DeclarationNameInfo *MapperIdInfoPtr = nullptr)
: OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc,
Sizes.NumVars),
NumUniqueDeclarations(Sizes.NumUniqueDeclarations),
NumComponentLists(Sizes.NumComponentLists),
NumComponents(Sizes.NumComponents) {
if (MapperQualifierLocPtr)
MapperQualifierLoc = *MapperQualifierLocPtr;
if (MapperIdInfoPtr)
MapperIdInfo = *MapperIdInfoPtr;
}
/// Get the unique declarations that are in the trailing objects of the
/// class.
MutableArrayRef<ValueDecl *> getUniqueDeclsRef() {
return MutableArrayRef<ValueDecl *>(
static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// Get the unique declarations that are in the trailing objects of the
/// class.
ArrayRef<ValueDecl *> getUniqueDeclsRef() const {
return ArrayRef<ValueDecl *>(
static_cast<const T *>(this)
->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// Set the unique declarations that are in the trailing objects of the
/// class.
void setUniqueDecls(ArrayRef<ValueDecl *> UDs) {
assert(UDs.size() == NumUniqueDeclarations &&
"Unexpected amount of unique declarations.");
std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin());
}
/// Get the number of lists per declaration that are in the trailing
/// objects of the class.
MutableArrayRef<unsigned> getDeclNumListsRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// Get the number of lists per declaration that are in the trailing
/// objects of the class.
ArrayRef<unsigned> getDeclNumListsRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// Set the number of lists per declaration that are in the trailing
/// objects of the class.
void setDeclNumLists(ArrayRef<unsigned> DNLs) {
assert(DNLs.size() == NumUniqueDeclarations &&
"Unexpected amount of list numbers.");
std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin());
}
/// Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
MutableArrayRef<unsigned> getComponentListSizesRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
ArrayRef<unsigned> getComponentListSizesRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// Set the cumulative component lists sizes that are in the trailing
/// objects of the class.
void setComponentListSizes(ArrayRef<unsigned> CLSs) {
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of component lists.");
std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin());
}
/// Get the components that are in the trailing objects of the class.
MutableArrayRef<MappableComponent> getComponentsRef() {
return MutableArrayRef<MappableComponent>(
static_cast<T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// Get the components that are in the trailing objects of the class.
ArrayRef<MappableComponent> getComponentsRef() const {
return ArrayRef<MappableComponent>(
static_cast<const T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// Set the components that are in the trailing objects of the class.
/// This requires the list sizes so that it can also fill the original
/// expressions, which are the first component of each list.
void setComponents(ArrayRef<MappableComponent> Components,
ArrayRef<unsigned> CLSs) {
assert(Components.size() == NumComponents &&
"Unexpected amount of component lists.");
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of list sizes.");
std::copy(Components.begin(), Components.end(), getComponentsRef().begin());
}
/// Fill the clause information from the list of declarations and
/// associated component lists.
void setClauseInfo(ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists) {
// Perform some checks to make sure the data sizes are consistent with the
// information available when the clause was created.
assert(getUniqueDeclarationsTotalNumber(Declarations) ==
NumUniqueDeclarations &&
"Unexpected number of mappable expression info entries!");
assert(getComponentsTotalNumber(ComponentLists) == NumComponents &&
"Unexpected total number of components!");
assert(Declarations.size() == ComponentLists.size() &&
"Declaration and component lists size is not consistent!");
assert(Declarations.size() == NumComponentLists &&
"Unexpected declaration and component lists size!");
// Organize the components by declaration and retrieve the original
// expression. Original expressions are always the first component of the
// mappable component list.
llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>>
ComponentListMap;
{
auto CI = ComponentLists.begin();
for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE;
++DI, ++CI) {
assert(!CI->empty() && "Invalid component list!");
ComponentListMap[*DI].push_back(*CI);
}
}
// Iterators of the target storage.
auto UniqueDeclarations = getUniqueDeclsRef();
auto UDI = UniqueDeclarations.begin();
auto DeclNumLists = getDeclNumListsRef();
auto DNLI = DeclNumLists.begin();
auto ComponentListSizes = getComponentListSizesRef();
auto CLSI = ComponentListSizes.begin();
auto Components = getComponentsRef();
auto CI = Components.begin();
// Variable to compute the accumulation of the number of components.
unsigned PrevSize = 0u;
// Scan all the declarations and associated component lists.
for (auto &M : ComponentListMap) {
// The declaration.
auto *D = M.first;
// The component lists.
auto CL = M.second;
// Initialize the entry.
*UDI = D;
++UDI;
*DNLI = CL.size();
++DNLI;
// Obtain the cumulative sizes and concatenate all the components in the
// reserved storage.
for (auto C : CL) {
// Accumulate with the previous size.
PrevSize += C.size();
// Save the size.
*CLSI = PrevSize;
++CLSI;
// Append components after the current components iterator.
CI = std::copy(C.begin(), C.end(), CI);
}
}
}
/// Set the nested name specifier of associated user-defined mapper.
void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) {
MapperQualifierLoc = NNSL;
}
/// Set the name of associated user-defined mapper.
void setMapperIdInfo(DeclarationNameInfo MapperId) {
MapperIdInfo = MapperId;
}
/// Get the user-defined mapper references that are in the trailing objects of
/// the class.
MutableArrayRef<Expr *> getUDMapperRefs() {
return llvm::makeMutableArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>() +
OMPVarListClause<T>::varlist_size(),
OMPVarListClause<T>::varlist_size());
}
/// Get the user-defined mappers references that are in the trailing objects
/// of the class.
ArrayRef<Expr *> getUDMapperRefs() const {
return llvm::makeArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>() +
OMPVarListClause<T>::varlist_size(),
OMPVarListClause<T>::varlist_size());
}
/// Set the user-defined mappers that are in the trailing objects of the
/// class.
void setUDMapperRefs(ArrayRef<Expr *> DMDs) {
assert(DMDs.size() == OMPVarListClause<T>::varlist_size() &&
"Unexpected number of user-defined mappers.");
std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin());
}
public:
/// Return the number of unique base declarations in this clause.
unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; }
/// Return the number of lists derived from the clause expressions.
unsigned getTotalComponentListNum() const { return NumComponentLists; }
/// Return the total number of components in all lists derived from the
/// clause.
unsigned getTotalComponentsNum() const { return NumComponents; }
/// Gets the nested name specifier for associated user-defined mapper.
NestedNameSpecifierLoc getMapperQualifierLoc() const {
return MapperQualifierLoc;
}
/// Gets the name info for associated user-defined mapper.
const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; }
/// Iterator that browse the components by lists. It also allows
/// browsing components of a single declaration.
class const_component_lists_iterator
: public llvm::iterator_adaptor_base<
const_component_lists_iterator,
MappableExprComponentListRef::const_iterator,
std::forward_iterator_tag, MappableComponent, ptrdiff_t,
MappableComponent, MappableComponent> {
// The declaration the iterator currently refers to.
ArrayRef<ValueDecl *>::iterator DeclCur;
// The list number associated with the current declaration.
ArrayRef<unsigned>::iterator NumListsCur;
// Remaining lists for the current declaration.
unsigned RemainingLists = 0;
// The cumulative size of the previous list, or zero if there is no previous
// list.
unsigned PrevListSize = 0;
// The cumulative sizes of the current list - it will delimit the remaining
// range of interest.
ArrayRef<unsigned>::const_iterator ListSizeCur;
ArrayRef<unsigned>::const_iterator ListSizeEnd;
// Iterator to the end of the components storage.
MappableExprComponentListRef::const_iterator End;
public:
/// Construct an iterator that scans all lists.
explicit const_component_lists_iterator(
ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum,
ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components)
: const_component_lists_iterator::iterator_adaptor_base(
Components.begin()),
DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()),
ListSizeCur(CumulativeListSizes.begin()),
ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) {
assert(UniqueDecls.size() == DeclsListNum.size() &&
"Inconsistent number of declarations and list sizes!");
if (!DeclsListNum.empty())
RemainingLists = *NumListsCur;
}
/// Construct an iterator that scan lists for a given declaration \a
/// Declaration.
explicit const_component_lists_iterator(
const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls,
ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components)
: const_component_lists_iterator(UniqueDecls, DeclsListNum,
CumulativeListSizes, Components) {
// Look for the desired declaration. While we are looking for it, we
// update the state so that we know the component where a given list
// starts.
for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) {
if (*DeclCur == Declaration)
break;
assert(*NumListsCur > 0 && "No lists associated with declaration??");
// Skip the lists associated with the current declaration, but save the
// last list size that was skipped.
std::advance(ListSizeCur, *NumListsCur - 1);
PrevListSize = *ListSizeCur;
++ListSizeCur;
}
// If we didn't find any declaration, advance the iterator to after the
// last component and set remaining lists to zero.
if (ListSizeCur == CumulativeListSizes.end()) {
this->I = End;
RemainingLists = 0u;
return;
}
// Set the remaining lists with the total number of lists of the current
// declaration.
RemainingLists = *NumListsCur;
// Adjust the list size end iterator to the end of the relevant range.
ListSizeEnd = ListSizeCur;
std::advance(ListSizeEnd, RemainingLists);
// Given that the list sizes are cumulative, the index of the component
// that start the list is the size of the previous list.
std::advance(this->I, PrevListSize);
}
// Return the array with the current list. The sizes are cumulative, so the
// array size is the difference between the current size and previous one.
std::pair<const ValueDecl *, MappableExprComponentListRef>
operator*() const {
assert(ListSizeCur != ListSizeEnd && "Invalid iterator!");
return std::make_pair(
*DeclCur,
MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize));
}
std::pair<const ValueDecl *, MappableExprComponentListRef>
operator->() const {
return **this;
}
// Skip the components of the current list.
const_component_lists_iterator &operator++() {
assert(ListSizeCur != ListSizeEnd && RemainingLists &&
"Invalid iterator!");
// If we don't have more lists just skip all the components. Otherwise,
// advance the iterator by the number of components in the current list.
if (std::next(ListSizeCur) == ListSizeEnd) {
this->I = End;
RemainingLists = 0;
} else {
std::advance(this->I, *ListSizeCur - PrevListSize);
PrevListSize = *ListSizeCur;
// We are done with a declaration, move to the next one.
if (!(--RemainingLists)) {
++DeclCur;
++NumListsCur;
RemainingLists = *NumListsCur;
assert(RemainingLists && "No lists in the following declaration??");
}
}
++ListSizeCur;
return *this;
}
};
using const_component_lists_range =
llvm::iterator_range<const_component_lists_iterator>;
/// Iterators for all component lists.
const_component_lists_iterator component_lists_begin() const {
return const_component_lists_iterator(
getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(),
getComponentsRef());
}
const_component_lists_iterator component_lists_end() const {
return const_component_lists_iterator(
ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(),
MappableExprComponentListRef(getComponentsRef().end(),
getComponentsRef().end()));
}
const_component_lists_range component_lists() const {
return {component_lists_begin(), component_lists_end()};
}
/// Iterators for component lists associated with the provided
/// declaration.
const_component_lists_iterator
decl_component_lists_begin(const ValueDecl *VD) const {
return const_component_lists_iterator(
VD, getUniqueDeclsRef(), getDeclNumListsRef(),
getComponentListSizesRef(), getComponentsRef());
}
const_component_lists_iterator decl_component_lists_end() const {
return component_lists_end();
}
const_component_lists_range decl_component_lists(const ValueDecl *VD) const {
return {decl_component_lists_begin(VD), decl_component_lists_end()};
}
/// Iterators to access all the declarations, number of lists, list sizes, and
/// components.
using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator;
using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>;
const_all_decls_range all_decls() const {
auto A = getUniqueDeclsRef();
return const_all_decls_range(A.begin(), A.end());
}
using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator;
using const_all_num_lists_range =
llvm::iterator_range<const_all_num_lists_iterator>;
const_all_num_lists_range all_num_lists() const {
auto A = getDeclNumListsRef();
return const_all_num_lists_range(A.begin(), A.end());
}
using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator;
using const_all_lists_sizes_range =
llvm::iterator_range<const_all_lists_sizes_iterator>;
const_all_lists_sizes_range all_lists_sizes() const {
auto A = getComponentListSizesRef();
return const_all_lists_sizes_range(A.begin(), A.end());
}
using const_all_components_iterator = ArrayRef<MappableComponent>::iterator;
using const_all_components_range =
llvm::iterator_range<const_all_components_iterator>;
const_all_components_range all_components() const {
auto A = getComponentsRef();
return const_all_components_range(A.begin(), A.end());
}
using mapperlist_iterator = MutableArrayRef<Expr *>::iterator;
using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator;
using mapperlist_range = llvm::iterator_range<mapperlist_iterator>;
using mapperlist_const_range =
llvm::iterator_range<mapperlist_const_iterator>;
mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); }
mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); }
mapperlist_const_iterator mapperlist_begin() const {
return getUDMapperRefs().begin();
}
mapperlist_const_iterator mapperlist_end() const {
return getUDMapperRefs().end();
}
mapperlist_range mapperlists() {
return mapperlist_range(mapperlist_begin(), mapperlist_end());
}
mapperlist_const_range mapperlists() const {
return mapperlist_const_range(mapperlist_begin(), mapperlist_end());
}
};
/// This represents clause 'map' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target map(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'map'
/// with the variables 'a' and 'b'.
class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>,
private llvm::TrailingObjects<
OMPMapClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Number of allowed map-type-modifiers.
static constexpr unsigned NumberOfModifiers =
OMPC_MAP_MODIFIER_last - OMPC_MAP_MODIFIER_unknown - 1;
private:
/// Map-type-modifiers for the 'map' clause.
OpenMPMapModifierKind MapTypeModifiers[NumberOfModifiers] = {
OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown,
OMPC_MAP_MODIFIER_unknown};
/// Location of map-type-modifiers for the 'map' clause.
SourceLocation MapTypeModifiersLoc[NumberOfModifiers];
/// Map type for the 'map' clause.
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
/// Is this an implicit map type or not.
bool MapTypeIsImplicit = false;
/// Location of the map type.
SourceLocation MapLoc;
/// Colon location.
SourceLocation ColonLoc;
/// Build a clause for \a NumVars listed expressions, \a
/// NumUniqueDeclarations declarations, \a NumComponentLists total component
/// lists, and \a NumComponents total expression components.
///
/// \param MapModifiers Map-type-modifiers.
/// \param MapModifiersLoc Locations of map-type-modifiers.
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param MapType Map type.
/// \param MapTypeIsImplicit Map type is inferred implicitly.
/// \param MapLoc Location of the map type.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<SourceLocation> MapModifiersLoc,
NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
OpenMPMapClauseKind MapType, bool MapTypeIsImplicit,
SourceLocation MapLoc, const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_map, Locs, Sizes, &MapperQualifierLoc,
&MapperIdInfo),
MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) {
assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() &&
"Unexpected number of map type modifiers.");
llvm::copy(MapModifiers, std::begin(MapTypeModifiers));
assert(llvm::array_lengthof(MapTypeModifiersLoc) ==
MapModifiersLoc.size() &&
"Unexpected number of map type modifier locations.");
llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc));
}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_map, OMPVarListLocTy(), Sizes) {}
/// Set map-type-modifier for the clause.
///
/// \param I index for map-type-modifier.
/// \param T map-type-modifier for the clause.
void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) {
assert(I < NumberOfModifiers &&
"Unexpected index to store map type modifier, exceeds array size.");
MapTypeModifiers[I] = T;
}
/// Set location for the map-type-modifier.
///
/// \param I index for map-type-modifier location.
/// \param TLoc map-type-modifier location.
void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) {
assert(I < NumberOfModifiers &&
"Index to store map type modifier location exceeds array size.");
MapTypeModifiersLoc[I] = TLoc;
}
/// Set type for the clause.
///
/// \param T Type for the clause.
void setMapType(OpenMPMapClauseKind T) { MapType = T; }
/// Set type location.
///
/// \param TLoc Type location.
void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; }
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param MapModifiers Map-type-modifiers.
/// \param MapModifiersLoc Location of map-type-modifiers.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
/// \param Type Map type.
/// \param TypeIsImplicit Map type is inferred implicitly.
/// \param TypeLoc Location of the map type.
static OMPMapClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<SourceLocation> MapModifiersLoc,
NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId,
OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc);
/// Creates an empty clause with the place for \a NumVars original
/// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists
/// lists, and \a NumComponents expression components.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPMapClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
/// Fetches mapping kind for the clause.
OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; }
/// Is this an implicit map type?
/// We have to capture 'IsMapTypeImplicit' from the parser for more
/// informative error messages. It helps distinguish map(r) from
/// map(tofrom: r), which is important to print more helpful error
/// messages for some target directives.
bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; }
/// Fetches the map-type-modifier at 'Cnt' index of array of modifiers.
///
/// \param Cnt index for map-type-modifier.
OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfModifiers &&
"Requested modifier exceeds the total number of modifiers.");
return MapTypeModifiers[Cnt];
}
/// Fetches the map-type-modifier location at 'Cnt' index of array of
/// modifiers' locations.
///
/// \param Cnt index for map-type-modifier location.
SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfModifiers &&
"Requested modifier location exceeds total number of modifiers.");
return MapTypeModifiersLoc[Cnt];
}
/// Fetches ArrayRef of map-type-modifiers.
ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY {
return llvm::makeArrayRef(MapTypeModifiers);
}
/// Fetches ArrayRef of location of map-type-modifiers.
ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY {
return llvm::makeArrayRef(MapTypeModifiersLoc);
}
/// Fetches location of clause mapping kind.
SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; }
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
child_range children() {
return child_range(
reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPMapClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
if (MapType == OMPC_MAP_to || MapType == OMPC_MAP_tofrom)
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
auto Children = const_cast<OMPMapClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_map;
}
};
/// This represents 'num_teams' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams num_teams(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'num_teams'
/// with single expression 'n'.
class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// NumTeams number.
Stmt *NumTeams = nullptr;
/// Set the NumTeams number.
///
/// \param E NumTeams number.
void setNumTeams(Expr *E) { NumTeams = E; }
public:
/// Build 'num_teams' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), NumTeams(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPNumTeamsClause()
: OMPClause(OMPC_num_teams, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return NumTeams number.
Expr *getNumTeams() { return cast<Expr>(NumTeams); }
/// Return NumTeams number.
Expr *getNumTeams() const { return cast<Expr>(NumTeams); }
child_range children() { return child_range(&NumTeams, &NumTeams + 1); }
const_child_range children() const {
return const_child_range(&NumTeams, &NumTeams + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_teams;
}
};
/// This represents 'thread_limit' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams thread_limit(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'thread_limit'
/// with single expression 'n'.
class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// ThreadLimit number.
Stmt *ThreadLimit = nullptr;
/// Set the ThreadLimit number.
///
/// \param E ThreadLimit number.
void setThreadLimit(Expr *E) { ThreadLimit = E; }
public:
/// Build 'thread_limit' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPThreadLimitClause(Expr *E, Stmt *HelperE,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_thread_limit, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPThreadLimitClause()
: OMPClause(OMPC_thread_limit, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return ThreadLimit number.
Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); }
/// Return ThreadLimit number.
Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); }
child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); }
const_child_range children() const {
return const_child_range(&ThreadLimit, &ThreadLimit + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_thread_limit;
}
};
/// This represents 'priority' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task priority(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'priority' with
/// single expression 'n'.
class OMPPriorityClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Priority number.
Stmt *Priority = nullptr;
/// Set the Priority number.
///
/// \param E Priority number.
void setPriority(Expr *E) { Priority = E; }
public:
/// Build 'priority' clause.
///
/// \param Priority Expression associated with this clause.
/// \param HelperPriority Helper priority for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPPriorityClause(Expr *Priority, Stmt *HelperPriority,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_priority, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Priority(Priority) {
setPreInitStmt(HelperPriority, CaptureRegion);
}
/// Build an empty clause.
OMPPriorityClause()
: OMPClause(OMPC_priority, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return Priority number.
Expr *getPriority() { return cast<Expr>(Priority); }
/// Return Priority number.
Expr *getPriority() const { return cast<Expr>(Priority); }
child_range children() { return child_range(&Priority, &Priority + 1); }
const_child_range children() const {
return const_child_range(&Priority, &Priority + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPPriorityClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_priority;
}
};
/// This represents 'grainsize' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop grainsize(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'grainsize'
/// with single expression '4'.
class OMPGrainsizeClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Grainsize = nullptr;
/// Set safelen.
void setGrainsize(Expr *Size) { Grainsize = Size; }
public:
/// Build 'grainsize' clause.
///
/// \param Size Expression associated with this clause.
/// \param HelperSize Helper grainsize for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPGrainsizeClause(Expr *Size, Stmt *HelperSize,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_grainsize, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Grainsize(Size) {
setPreInitStmt(HelperSize, CaptureRegion);
}
/// Build an empty clause.
explicit OMPGrainsizeClause()
: OMPClause(OMPC_grainsize, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); }
child_range children() { return child_range(&Grainsize, &Grainsize + 1); }
const_child_range children() const {
return const_child_range(&Grainsize, &Grainsize + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPGrainsizeClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_grainsize;
}
};
/// This represents 'nogroup' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp taskloop nogroup
/// \endcode
/// In this example directive '#pragma omp taskloop' has 'nogroup' clause.
class OMPNogroupClause : public OMPClause {
public:
/// Build 'nogroup' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_nogroup, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPNogroupClause()
: OMPClause(OMPC_nogroup, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_nogroup;
}
};
/// This represents 'num_tasks' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop num_tasks(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'num_tasks'
/// with single expression '4'.
class OMPNumTasksClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *NumTasks = nullptr;
/// Set safelen.
void setNumTasks(Expr *Size) { NumTasks = Size; }
public:
/// Build 'num_tasks' clause.
///
/// \param Size Expression associated with this clause.
/// \param HelperSize Helper grainsize for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNumTasksClause(Expr *Size, Stmt *HelperSize,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_num_tasks, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), NumTasks(Size) {
setPreInitStmt(HelperSize, CaptureRegion);
}
/// Build an empty clause.
explicit OMPNumTasksClause()
: OMPClause(OMPC_num_tasks, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); }
child_range children() { return child_range(&NumTasks, &NumTasks + 1); }
const_child_range children() const {
return const_child_range(&NumTasks, &NumTasks + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPNumTasksClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_tasks;
}
};
/// This represents 'hint' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp critical (name) hint(6)
/// \endcode
/// In this example directive '#pragma omp critical' has name 'name' and clause
/// 'hint' with argument '6'.
class OMPHintClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Hint expression of the 'hint' clause.
Stmt *Hint = nullptr;
/// Set hint expression.
void setHint(Expr *H) { Hint = H; }
public:
/// Build 'hint' clause with expression \a Hint.
///
/// \param Hint Hint expression.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc),
Hint(Hint) {}
/// Build an empty clause.
OMPHintClause() : OMPClause(OMPC_hint, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns number of threads.
Expr *getHint() const { return cast_or_null<Expr>(Hint); }
child_range children() { return child_range(&Hint, &Hint + 1); }
const_child_range children() const {
return const_child_range(&Hint, &Hint + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_hint;
}
};
/// This represents 'dist_schedule' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp distribute dist_schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp distribute' has 'dist_schedule'
/// clause with arguments 'static' and '3'.
class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'schedule' clause.
OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown;
/// Start location of the schedule kind in source code.
SourceLocation KindLoc;
/// Location of ',' (if any).
SourceLocation CommaLoc;
/// Chunk size.
Expr *ChunkSize = nullptr;
/// Set schedule kind.
///
/// \param K Schedule kind.
void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; }
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// Set location of ','.
///
/// \param Loc Location of ','.
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// Set chunk size.
///
/// \param E Chunk size.
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// Build 'dist_schedule' clause with schedule kind \a Kind and chunk
/// size expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind DistSchedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc,
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
Stmt *HelperChunkSize)
: OMPClause(OMPC_dist_schedule, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind),
KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
}
/// Build an empty clause.
explicit OMPDistScheduleClause()
: OMPClause(OMPC_dist_schedule, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Get kind of the clause.
OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; }
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getDistScheduleKindLoc() { return KindLoc; }
/// Get location of ','.
SourceLocation getCommaLoc() { return CommaLoc; }
/// Get chunk size.
Expr *getChunkSize() { return ChunkSize; }
/// Get chunk size.
const Expr *getChunkSize() const { return ChunkSize; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
const_child_range children() const {
auto Children = const_cast<OMPDistScheduleClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_dist_schedule;
}
};
/// This represents 'defaultmap' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp target defaultmap(tofrom: scalar)
/// \endcode
/// In this example directive '#pragma omp target' has 'defaultmap' clause of kind
/// 'scalar' with modifier 'tofrom'.
class OMPDefaultmapClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Modifiers for 'defaultmap' clause.
OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown;
/// Locations of modifiers.
SourceLocation ModifierLoc;
/// A kind of the 'defaultmap' clause.
OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown;
/// Start location of the defaultmap kind in source code.
SourceLocation KindLoc;
/// Set defaultmap kind.
///
/// \param K Defaultmap kind.
void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; }
/// Set the defaultmap modifier.
///
/// \param M Defaultmap modifier.
void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) {
Modifier = M;
}
/// Set location of the defaultmap modifier.
void setDefaultmapModifierLoc(SourceLocation Loc) {
ModifierLoc = Loc;
}
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set defaultmap kind start location.
///
/// \param KLoc Defaultmap kind location.
void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
public:
/// Build 'defaultmap' clause with defaultmap kind \a Kind
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param EndLoc Ending location of the clause.
/// \param Kind Defaultmap kind.
/// \param M The modifier applied to 'defaultmap' clause.
/// \param MLoc Location of the modifier
OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation MLoc, SourceLocation KLoc,
SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind,
OpenMPDefaultmapClauseModifier M)
: OMPClause(OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc),
Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {}
/// Build an empty clause.
explicit OMPDefaultmapClause()
: OMPClause(OMPC_defaultmap, SourceLocation(), SourceLocation()) {}
/// Get kind of the clause.
OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; }
/// Get the modifier of the clause.
OpenMPDefaultmapClauseModifier getDefaultmapModifier() const {
return Modifier;
}
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getDefaultmapKindLoc() { return KindLoc; }
/// Get the modifier location.
SourceLocation getDefaultmapModifierLoc() const {
return ModifierLoc;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_defaultmap;
}
};
/// This represents clause 'to' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update to(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to'
/// with the variables 'a' and 'b'.
class OMPToClause final : public OMPMappableExprListClause<OMPToClause>,
private llvm::TrailingObjects<
OMPToClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPToClause(NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_to, Locs, Sizes, &MapperQualifierLoc,
&MapperIdInfo) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_to, OMPVarListLocTy(), Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
NestedNameSpecifierLoc UDMQualifierLoc,
DeclarationNameInfo MapperId);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPToClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPToClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_to;
}
};
/// This represents clause 'from' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update from(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'from'
/// with the variables 'a' and 'b'.
class OMPFromClause final
: public OMPMappableExprListClause<OMPFromClause>,
private llvm::TrailingObjects<
OMPFromClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPFromClause(NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_from, Locs, Sizes, &MapperQualifierLoc,
&MapperIdInfo) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_from, OMPVarListLocTy(), Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
static OMPFromClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
NestedNameSpecifierLoc UDMQualifierLoc,
DeclarationNameInfo MapperId);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPFromClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFromClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_from;
}
};
/// This represents clause 'use_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target data use_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target data' has clause
/// 'use_device_ptr' with the variables 'a' and 'b'.
class OMPUseDevicePtrClause final
: public OMPMappableExprListClause<OMPUseDevicePtrClause>,
private llvm::TrailingObjects<
OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_use_device_ptr, Locs, Sizes) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_use_device_ptr, OMPVarListLocTy(),
Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return 3 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
/// Sets the list of references to private copies with initializers for new
/// private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for new
/// private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new private
/// variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// Gets the list of references to initializer variables for new private
/// variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param PrivateVars Expressions referring to private copies.
/// \param Inits Expressions referring to private copy initializers.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPUseDevicePtrClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars,
ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPUseDevicePtrClause *
CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_use_device_ptr;
}
};
/// This represents clause 'is_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target is_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause
/// 'is_device_ptr' with the variables 'a' and 'b'.
class OMPIsDevicePtrClause final
: public OMPMappableExprListClause<OMPIsDevicePtrClause>,
private llvm::TrailingObjects<
OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_is_device_ptr, Locs, Sizes) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_is_device_ptr, OMPVarListLocTy(),
Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPIsDevicePtrClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPIsDevicePtrClause *
CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_is_device_ptr;
}
};
/// This represents clause 'nontemporal' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp simd nontemporal(a)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'nontemporal' for
/// the variable 'a'.
class OMPNontemporalClause final
: public OMPVarListClause<OMPNontemporalClause>,
private llvm::TrailingObjects<OMPNontemporalClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPNontemporalClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPNontemporalClause>(OMPC_nontemporal, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPNontemporalClause(unsigned N)
: OMPVarListClause<OMPNontemporalClause>(
OMPC_nontemporal, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Get the list of privatied copies if the member expression was captured by
/// one of the privatization clauses.
MutableArrayRef<Expr *> getPrivateRefs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateRefs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPNontemporalClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPNontemporalClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Sets the list of references to private copies created in private clauses.
/// \param VL List of references.
void setPrivateRefs(ArrayRef<Expr *> VL);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPNontemporalClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range private_refs() {
return child_range(reinterpret_cast<Stmt **>(getPrivateRefs().begin()),
reinterpret_cast<Stmt **>(getPrivateRefs().end()));
}
const_child_range private_refs() const {
auto Children = const_cast<OMPNontemporalClause *>(this)->private_refs();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_nontemporal;
}
};
/// This represents 'order' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp simd order(concurrent)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'order'
/// clause with kind 'concurrent'.
class OMPOrderClause final : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'default' clause.
OpenMPOrderClauseKind Kind = OMPC_ORDER_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clause.
///
/// \param K Argument of clause.
void setKind(OpenMPOrderClauseKind K) { Kind = K; }
/// Set argument location.
///
/// \param KLoc Argument location.
void setKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'order' clause with argument \p A ('concurrent').
///
/// \param A Argument of the clause ('concurrent').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPOrderClause(OpenMPOrderClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A),
KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPOrderClause()
: OMPClause(OMPC_order, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
OpenMPOrderClauseKind getKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_order;
}
};
/// This class implements a simple visitor for OMPClause
/// subclasses.
template<class ImplClass, template <typename> class Ptr, typename RetTy>
class OMPClauseVisitorBase {
public:
#define PTR(CLASS) Ptr<CLASS>
#define DISPATCH(CLASS) \
return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S))
#define OPENMP_CLAUSE(Name, Class) \
RetTy Visit ## Class (PTR(Class) S) { DISPATCH(Class); }
#include "clang/Basic/OpenMPKinds.def"
RetTy Visit(PTR(OMPClause) S) {
// Top switch clause: visit each OMPClause.
switch (S->getClauseKind()) {
default: llvm_unreachable("Unknown clause kind!");
#define OPENMP_CLAUSE(Name, Class) \
case OMPC_ ## Name : return Visit ## Class(static_cast<PTR(Class)>(S));
#include "clang/Basic/OpenMPKinds.def"
}
}
// Base case, ignore it. :)
RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); }
#undef PTR
#undef DISPATCH
};
template <typename T> using const_ptr = std::add_pointer_t<std::add_const_t<T>>;
template <class ImplClass, typename RetTy = void>
class OMPClauseVisitor
: public OMPClauseVisitorBase<ImplClass, std::add_pointer_t, RetTy> {};
template<class ImplClass, typename RetTy = void>
class ConstOMPClauseVisitor :
public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {};
class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> {
raw_ostream &OS;
const PrintingPolicy &Policy;
/// Process clauses with list of variables.
template <typename T> void VisitOMPClauseList(T *Node, char StartSym);
public:
OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy)
: OS(OS), Policy(Policy) {}
#define OPENMP_CLAUSE(Name, Class) void Visit##Class(Class *S);
#include "clang/Basic/OpenMPKinds.def"
};
/// Helper data structure representing the traits in a match clause of an
/// `declare variant` or `metadirective`. The outer level is an ordered
/// collection of selector sets, each with an associated kind and an ordered
/// collection of selectors. A selector has a kind, an optional score/condition,
/// and an ordered collection of properties.
struct OMPTraitInfo {
struct OMPTraitProperty {
llvm::omp::TraitProperty Kind = llvm::omp::TraitProperty::invalid;
};
struct OMPTraitSelector {
Expr *ScoreOrCondition = nullptr;
llvm::omp::TraitSelector Kind = llvm::omp::TraitSelector::invalid;
llvm::SmallVector<OMPTraitProperty, 4> Properties;
};
struct OMPTraitSet {
llvm::omp::TraitSet Kind = llvm::omp::TraitSet::invalid;
llvm::SmallVector<OMPTraitSelector, 4> Selectors;
};
/// The outermost level of selector sets.
llvm::SmallVector<OMPTraitSet, 4> Sets;
bool anyScoreOrCondition(
const llvm::function_ref<bool(Expr *&, bool /* IsScore */)> &Cond) {
return llvm::any_of(Sets, [&Cond](OMPTraitInfo::OMPTraitSet &Set) {
return llvm::any_of(
Set.Selectors, [&Cond](OMPTraitInfo::OMPTraitSelector &Selector) {
return Cond(Selector.ScoreOrCondition,
/* IsScore */ Selector.Kind !=
llvm::omp::TraitSelector::user_condition);
});
});
}
/// Create a variant match info object from this trait info object. While the
/// former is a flat representation the actual main difference is that the
/// latter uses clang::Expr to store the score/condition while the former is
/// independent of clang. Thus, expressions and conditions are evaluated in
/// this method.
void getAsVariantMatchInfo(ASTContext &ASTCtx,
llvm::omp::VariantMatchInfo &VMI) const;
/// Print a human readable representation into \p OS.
void print(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const;
};
llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo &TI);
} // namespace clang
#endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
|
GB_binop__div_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__div_fp32
// A.*B function (eWiseMult): GB_AemultB__div_fp32
// A*D function (colscale): GB_AxD__div_fp32
// D*A function (rowscale): GB_DxB__div_fp32
// C+=B function (dense accum): GB_Cdense_accumB__div_fp32
// C+=b function (dense accum): GB_Cdense_accumb__div_fp32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_fp32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_fp32
// C=scalar+B GB_bind1st__div_fp32
// C=scalar+B' GB_bind1st_tran__div_fp32
// C=A+scalar GB_bind2nd__div_fp32
// C=A'+scalar GB_bind2nd_tran__div_fp32
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = (aij / bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x / y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_FP32 || GxB_NO_DIV_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__div_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__div_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__div_fp32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__div_fp32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__div_fp32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__div_fp32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__div_fp32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__div_fp32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__div_fp32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = (x / bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__div_fp32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = (aij / y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (x / aij) ; \
}
GrB_Info GB_bind1st_tran__div_fp32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij / y) ; \
}
GrB_Info GB_bind2nd_tran__div_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GraftInteriorForestsSetTransferIterationWorklet.h | //============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_tree_grafter_graft_interior_forests_set_trandfer_iteration_worklet_h
#define vtk_m_worklet_contourtree_distributed_tree_grafter_graft_interior_forests_set_trandfer_iteration_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace tree_grafter
{
/// Now set the transfer iteration for all attachment points
/// If there were no supernodes to transfer, their types are all NO_SUCH_ELEMENT
class GraftInteriorForestsSetTransferIterationWorklet : public vtkm::worklet::WorkletMapField
{
public:
// NOTE: supernodeType is sized to ContourTree.Supernodes.GetNumberOfValues() so we can use it for our iteration
// NOTE: for whenTransferred we neeed need FieldInOut type to avoid overwrite of existing value as not all values will be updated
using ControlSignature = void(FieldIn supernodeType, // input
FieldIn hierarchicalSuperId, // input
FieldInOut whenTransferred // output
);
using ExecutionSignature = void(_1, _2, _3);
using InputDomain = _1;
// Default Constructor
VTKM_EXEC_CONT
GraftInteriorForestsSetTransferIterationWorklet(const vtkm::Id& numTransferIterations)
: NumTransferIterations(numTransferIterations)
{
}
VTKM_EXEC void operator()(const vtkm::Id& supernodeType,
const vtkm::Id& hierarchicalSuperId,
vtkm::Id& whenTransferred) const
{ // operator ()
if ((supernodeType == vtkm::worklet::contourtree_augmented::IS_ATTACHMENT) &&
vtkm::worklet::contourtree_augmented::NoSuchElement(hierarchicalSuperId))
{ // not a supernode in the hierarchical tree yet
whenTransferred =
(this->NumTransferIterations | vtkm::worklet::contourtree_augmented::IS_SUPERNODE);
} // not a supernode in the hierarchical tree yet
// In serial this worklet implements the following operation
/*
// Now set the transfer iteration for all attachment points
// If there were no supernodes to transfer, their types are all NO_SUCH_ELEMENT
#pragma omp parallel for
for (indexType supernode = 0; supernode < contourTree->supernodes.size(); supernode++)
{ // per supernode
// std::cout << "Supernode " << supernode << std::endl;
if ((supernodeType[supernode] == IS_ATTACHMENT) && noSuchElement(hierarchicalSuperID[supernode]))
{ // not a supernode in the hierarchical tree yet
whenTransferred[supernode] = nTransferIterations | IS_SUPERNODE;
} // not a supernode in the hierarchical tree yet
} // per supernode
*/
} // operator ()
private:
vtkm::Id NumTransferIterations;
}; // BoundaryVerticiesPerSuperArcStepOneWorklet
} // namespace tree_grafter
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif
|
IF97_Region5.c | // Copyright Martin Lord 2014-2014.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// IAPWS-IF97 Region 5: High Temperature Region equations
/* *********************************************************************
* ******* VALIDITY ************
*
* Region 5 of IAPWS-IF97 is valid for the following temperature and
* pressure range:
* 1073.15 K <= T <= 2273.15 K
* 0 < p <= 50 MPa.
*
* It is only valid for pure undissociated water; any dissociation will
* have to be taken into account separately.
*
*
* ****************************************************************** */
/* ********************************************************************
* COMPILE AND LINK INSTRUCTIONS (gcc) *
*
* This library uses math.h, so must have the -lm link flag
*
* The library is programmed to be able to use OpenMP multithreading
* use the -fopenmp complie flag to enable multithreadded code
*
* ****************************************************************** */
#include "IF97_common.h" //PSTAR TSTAR & sqr
#include "IF97_Region5.h"
#include <math.h> // for pow, log
/* #ifdef _OPENMP // multithreading via libgomp
# include <omp.h>
#endif
*/
// #include <stdio.h> used for debugging only
//***************************************************************
//****** REGION 5 *******************************
// See Table 37
const typIF97Coeffs_Jn GIBBS_COEFFS_R5_O[] = {
{0, 0.0} // not used
,{0, -0.13179983674201E2} // 1
,{1, 0.68540841634434E1}
,{-3, -0.24805148933466E-1}
,{-2, 0.36901534980333}
,{-1, -0.31161318213925E1}
,{2, -0.32961626538917} //6
};
const int MAX_GIBBS_COEFFS_R5_O = 6;
// ideal gas part of dimensionless gibbs free energy in Region5 : See Equation 33
double if97_r5_Gamma_o (double if97_pi, double if97_tau) {
int i;
double dblGammaSum = 0.0;
#pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded
for (i=1; i <= MAX_GIBBS_COEFFS_R5_O; i++) {
dblGammaSum += GIBBS_COEFFS_R5_O[i].ni * pow( if97_tau, GIBBS_COEFFS_R5_O[i].Ji);
}
return log(if97_pi) + dblGammaSum;
}
// See Table 38
const typIF97Coeffs_IJn GIBBS_COEFFS_R5_R[] = {
{0, 0, 0.0} // not used
,{1, 1, 0.15736404855259E-2} //1
,{1, 2, 0.90153761673944E-3}
,{1, 3, -0.50270077677648E-2}
,{2, 3, 0.22440037409485E-5}
,{2, 9, -0.41163275453471E-5}
,{3, 7, 0.37919454822955E-7} //6
};
const int MAX_GIBBS_COEFFS_R5_R = 6;
// residual part of dimensionless gibbs free energy in Region5 : See Equation 34
double if97_r5_Gamma_r (double if97_pi, double if97_tau) {
int i;
double dblGammaSum = 0.0;
#pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded
for (i=1; i <= MAX_GIBBS_COEFFS_R5_R; i++) {
dblGammaSum += GIBBS_COEFFS_R5_R[i].ni * pow(if97_pi, GIBBS_COEFFS_R5_R[i].Ii)* pow(if97_tau, GIBBS_COEFFS_R5_R[i].Ji) ;
}
return dblGammaSum;
}
/* dimensionless gibbs free energy in Region 5 = g/RT:
* The fundamental equation of region 2. See Equation 32 */
double if97_r5_Gamma (double if97_pi, double if97_tau) {
return if97_r5_Gamma_o (if97_pi, if97_tau) + if97_r5_Gamma_r (if97_pi, if97_tau);
}
// [d gamma_o / d pi] keeping tau constant
double if97_r5_GammaPi_o (double if97_pi) {
double GammaPi_o = 1.0 / if97_pi;
return GammaPi_o;
}
// [d squared gamma_o / d pi squared] keeping tau constant
double if97_r5_GammaPiPi_o (double if97_pi) {
return -1.0 / sqr (if97_pi);
}
// [d gamma_o / d tau] keeping pi constant
// Checked OK
double if97_r5_GammaTau_o (double if97_tau) {
int i;
double dblGammaSum = 0.0;
#pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded
for (i=1; i <= MAX_GIBBS_COEFFS_R5_O; i++) {
dblGammaSum += GIBBS_COEFFS_R5_O[i].ni * GIBBS_COEFFS_R5_O[i].Ji * pow(if97_tau, ( GIBBS_COEFFS_R5_O[i].Ji - 1.0));
}
return dblGammaSum;
}
// [d squared gamma_o / d tau squared] keeping pi constant
double if97_r5_GammaTauTau_o (double if97_tau) {
int i;
double dblGammaSum = 0.0;
#pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded
for (i=1; i <= MAX_GIBBS_COEFFS_R5_O; i++) {
dblGammaSum += GIBBS_COEFFS_R5_O[i].ni * GIBBS_COEFFS_R5_O[i].Ji * ( GIBBS_COEFFS_R5_O[i].Ji - 1.0) * pow(if97_tau, ( GIBBS_COEFFS_R5_O[i].Ji - 2.0));
}
return dblGammaSum;
}
// [d squared gamma_o / d pi d tau]
const double if97_r5_GammaPiTau_o = 0.0;
// [d gamma_r / d pi] keeping tau constant
double if97_r5_GammaPi_r (double if97_pi, double if97_tau) {
int i;
double dblGammaSum = 0.0;
#pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded
for (i=1; i <= MAX_GIBBS_COEFFS_R5_R; i++) {
dblGammaSum += GIBBS_COEFFS_R5_R[i].ni * GIBBS_COEFFS_R5_R[i].Ii
* pow( if97_pi, (GIBBS_COEFFS_R5_R[i].Ii - 1.0)) * pow(if97_tau, GIBBS_COEFFS_R5_R[i].Ji);
}
return dblGammaSum;
}
// [d squared gamma_r / d pi squared] keeping tau constant
double if97_r5_GammaPiPi_r (double if97_pi, double if97_tau) {
int i;
double dblGammaSum = 0.0;
#pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded
for (i=1; i <= MAX_GIBBS_COEFFS_R5_R; i++) {
dblGammaSum += GIBBS_COEFFS_R5_R[i].ni * GIBBS_COEFFS_R5_R[i].Ii * (GIBBS_COEFFS_R5_R[i].Ii - 1.0)
* pow(if97_pi, (GIBBS_COEFFS_R5_R[i].Ii - 2.0)) * pow(if97_tau, GIBBS_COEFFS_R5_R[i].Ji);
}
return dblGammaSum;
}
// [d gamma_r / d tau] keeping pi constant
double if97_r5_GammaTau_r (double if97_pi, double if97_tau) {
int i;
double dblGammaSum = 0.0;
#pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded
for (i=1; i <= MAX_GIBBS_COEFFS_R5_R; i++) {
dblGammaSum += GIBBS_COEFFS_R5_R[i].ni * pow( if97_pi, GIBBS_COEFFS_R5_R[i].Ii)
* GIBBS_COEFFS_R5_R[i].Ji * pow(if97_tau, (GIBBS_COEFFS_R5_R[i].Ji - 1.0));
}
return dblGammaSum;
}
// [d squared gamma_r / d tau squared] keeping pi constant
double if97_r5_GammaTauTau_r (double if97_pi, double if97_tau) {
int i;
double dblGammaSum = 0.0;
#pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded
for (i=1; i <= MAX_GIBBS_COEFFS_R5_R; i++) {
dblGammaSum += GIBBS_COEFFS_R5_R[i].ni * pow( if97_pi, GIBBS_COEFFS_R5_R[i].Ii) * GIBBS_COEFFS_R5_R[i].Ji
* (GIBBS_COEFFS_R5_R[i].Ji - 1.0) * pow(if97_tau, (GIBBS_COEFFS_R5_R[i].Ji - 2.0));
}
return dblGammaSum;
}
// [d squared gamma_r / d tau squared] keeping pi constant
double if97_r5_GammaPiTau_r (double if97_pi, double if97_tau) {
int i;
double dblGammaSum = 0.0;
#pragma omp parallel for reduction(+:dblGammaSum) //handle loop multithreaded
for (i=1; i <= MAX_GIBBS_COEFFS_R5_R; i++) {
dblGammaSum += GIBBS_COEFFS_R5_R[i].ni * GIBBS_COEFFS_R5_R[i].Ii * pow(if97_pi, (GIBBS_COEFFS_R5_R[i].Ii - 1.0))
* GIBBS_COEFFS_R5_R[i].Ji * pow(if97_tau, ( GIBBS_COEFFS_R5_R[i].Ji - 1.0));
}
return dblGammaSum;
}
//**********************************************************
//********* REGION 5 PROPERTY EQUATIONS*********************
// specific Gibbs free energy in region 5 (kJ / kg)
double if97_r5_g (double p_MPa , double t_Kelvin) {
double if97pi = p_MPa / PSTAR_R5;
double if97tau = TSTAR_R5 / t_Kelvin;
return IF97_R * t_Kelvin * if97_r5_Gamma(if97pi, if97tau);
}
// specific volume in region 5 (metres cubed per kilogram)
// inputs need to convert to pure SI, hence the ´magic´ numbers
// Checked OK
double if97_r5_v (double p_MPa , double t_Kelvin ){
double if97pi = p_MPa / PSTAR_R5;
double if97tau = TSTAR_R5 / t_Kelvin;
return (IF97_R *1000 * t_Kelvin / (p_MPa * 1e6) ) * if97pi * ( if97_r5_GammaPi_o(if97pi) + if97_r5_GammaPi_r(if97pi, if97tau));
}
// specific internal energy in region 5 (KJ / Kg)
// Checked OK
double if97_r5_u (double p_MPa , double t_Kelvin ){
double if97pi = p_MPa / PSTAR_R5;
double if97tau = TSTAR_R5/t_Kelvin;
return (IF97_R* t_Kelvin ) * ((if97tau * (if97_r5_GammaTau_o(if97tau) + if97_r5_GammaTau_r(if97pi, if97tau)))
- (if97pi * (if97_r5_GammaPi_o(if97pi) + if97_r5_GammaPi_r(if97pi, if97tau)))
);
}
// specific entropy in region 5 (KJ / Kg.K)
// Checked OK
double if97_r5_s (double p_MPa , double t_Kelvin ){
double if97pi = p_MPa / PSTAR_R5;
double if97tau = TSTAR_R5/t_Kelvin;
return (IF97_R ) * (if97tau * (if97_r5_GammaTau_o(if97tau) + if97_r5_GammaTau_r(if97pi,if97tau)) - if97_r5_Gamma(if97pi, if97tau)) ;
}
// specific enthalpy in region 5 (KJ / Kg)
// Checked OK
double if97_r5_h (double p_MPa , double t_Kelvin ){
double if97pi = p_MPa / PSTAR_R5;
double if97tau = TSTAR_R5/t_Kelvin;
return IF97_R * t_Kelvin * if97tau * (if97_r5_GammaTau_o(if97tau) + if97_r5_GammaTau_r(if97pi, if97tau)) ;
}
// specific isobaric heat capacity in region 5 (KJ / Kg.K)
// Checked OK
double if97_r5_Cp (double p_MPa , double t_Kelvin ){
double if97pi = p_MPa / PSTAR_R5;
double if97tau = TSTAR_R5/t_Kelvin;
return (-IF97_R * sqr(if97tau) * (if97_r5_GammaTauTau_o(if97tau) + if97_r5_GammaTauTau_r(if97pi, if97tau))) ;
}
// specific isochoric heat capacity in region 5 (KJ / Kg.K)
double if97_r5_Cv (double p_MPa , double t_Kelvin ){
double if97pi = p_MPa / PSTAR_R5;
double if97tau = TSTAR_R5 / t_Kelvin;
return IF97_R * ((- sqr(if97tau) * (if97_r5_GammaTauTau_o(if97tau) + if97_r5_GammaTauTau_r(if97pi, if97tau))) - (
sqr ( 1.0 + if97pi * if97_r5_GammaPi_r(if97pi, if97tau) - if97tau * if97pi * if97_r5_GammaPiTau_r(if97pi, if97tau))
/ (1.0 - sqr(if97pi) * if97_r5_GammaPiPi_r (if97pi, if97tau)))
) ;
}
// speed of sound in region 5 (m/s)
// inputs need to convert to pure SI, hence the ´magic´ number 1000
// checked OK
double if97_r5_w (double p_MPa , double t_Kelvin ){
double if97pi = p_MPa / PSTAR_R5;
double if97tau = TSTAR_R5/t_Kelvin;
return sqrt( IF97_R * 1000 * t_Kelvin * ((1.0 + 2.0 * if97pi * if97_r5_GammaPi_r(if97pi, if97tau) + sqr(if97pi) * sqr(if97_r5_GammaPi_r(if97pi, if97tau))) /
((1.0 - sqr(if97pi) * if97_r5_GammaPiPi_r(if97pi, if97tau)) +
( sqr ( 1.0 + if97pi * if97_r5_GammaPi_r(if97pi, if97tau) - if97tau * if97pi * if97_r5_GammaPiTau_r(if97pi, if97tau)) /
( sqr(if97tau) * (if97_r5_GammaTauTau_o(if97tau) + if97_r5_GammaTauTau_r(if97pi, if97tau)))
)
)
)
);
}
|
GB_sort.c | //------------------------------------------------------------------------------
// GB_sort: sort all vectors in a matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB_sort.h"
#include "GB_werk.h"
#include "GB_transpose.h"
#include "GB_ek_slice.h"
// macros:
// GB_SORT (func) defined as GB_sort_func_TYPE_ascend or _descend,
// GB_msort_ISO_ascend or _descend,
// or GB_msort_func_UDT
// GB_TYPE bool, int8_, ... or GB_void for UDT
// GB_ADDR(A,p) A+p for builtin, A + p * GB_SIZE otherwise
// GB_SIZE size of each entry: sizeof (GB_TYPE) for built-in
// GB_GET(x,X,i) x = (op->xtype) X [i]
// GB_COPY(A,i,C,k) A [i] = C [k]
// GB_SWAP(A,i,k) swap A [i] and A [k]
// GB_LT compare two entries, x < y
//------------------------------------------------------------------------------
// macros for all built-in types
//------------------------------------------------------------------------------
#define GB_SORT_UDT 0
#define GB_ADDR(A,i) ((A) + (i))
#define GB_GET(x,A,i) GB_TYPE x = A [i]
#define GB_COPY(A,i,B,j) A [i] = B [j]
#define GB_SIZE sizeof (GB_TYPE)
#define GB_SWAP(A,i,j) { GB_TYPE t = A [i] ; A [i] = A [j] ; A [j] = t ; }
//------------------------------------------------------------------------------
// ascending sort for built-in types
//------------------------------------------------------------------------------
#define GB_LT(less,a,i,b,j) \
less = (((a) < (b)) ? true : (((a) == (b)) ? ((i) < (j)) : false))
#define GB_TYPE bool
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_BOOL)
#include "GB_sort_template.c"
#define GB_TYPE int8_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_INT8)
#include "GB_sort_template.c"
#define GB_TYPE int16_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_INT16)
#include "GB_sort_template.c"
#define GB_TYPE int32_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_INT32)
#include "GB_sort_template.c"
#define GB_TYPE int64_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_INT64)
#include "GB_sort_template.c"
#define GB_TYPE uint8_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_UINT8)
#include "GB_sort_template.c"
#define GB_TYPE uint16_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_UINT16)
#include "GB_sort_template.c"
#define GB_TYPE uint32_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_UINT32)
#include "GB_sort_template.c"
#define GB_TYPE uint64_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_UINT64)
#include "GB_sort_template.c"
#define GB_TYPE float
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_FP32)
#include "GB_sort_template.c"
#define GB_TYPE double
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_FP64)
#include "GB_sort_template.c"
//------------------------------------------------------------------------------
// descending sort for built-in types
//------------------------------------------------------------------------------
#undef GB_LT
#define GB_LT(less,a,i,b,j) \
less = (((a) > (b)) ? true : (((a) == (b)) ? ((i) < (j)) : false))
#define GB_TYPE bool
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_BOOL)
#include "GB_sort_template.c"
#define GB_TYPE int8_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_INT8)
#include "GB_sort_template.c"
#define GB_TYPE int16_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_INT16)
#include "GB_sort_template.c"
#define GB_TYPE int32_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_INT32)
#include "GB_sort_template.c"
#define GB_TYPE int64_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_INT64)
#include "GB_sort_template.c"
#define GB_TYPE uint8_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_UINT8)
#include "GB_sort_template.c"
#define GB_TYPE uint16_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_UINT16)
#include "GB_sort_template.c"
#define GB_TYPE uint32_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_UINT32)
#include "GB_sort_template.c"
#define GB_TYPE uint64_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_UINT64)
#include "GB_sort_template.c"
#define GB_TYPE float
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_FP32)
#include "GB_sort_template.c"
#define GB_TYPE double
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_FP64)
#include "GB_sort_template.c"
//------------------------------------------------------------------------------
// macros for user-defined types and when typecasting is performed
//------------------------------------------------------------------------------
#undef GB_ADDR
#undef GB_GET
#undef GB_COPY
#undef GB_SIZE
#undef GB_SWAP
#undef GB_LT
#define GB_ADDR(A,i) ((A) + (i) * csize)
#define GB_GET(x,A,i) GB_void x [GB_VLA(xsize)] ; \
fcast (x, GB_ADDR (A, i), csize)
#define GB_COPY(A,i,B,j) memcpy (GB_ADDR (A, i), GB_ADDR (B, j), csize)
#define GB_SIZE csize
#define GB_TYPE GB_void
#define GB_SWAP(A,i,j) \
{ \
GB_void t [GB_VLA(csize)] ; /* declare the scalar t */ \
memcpy (t, GB_ADDR (A, i), csize) ; /* t = A [i] */ \
GB_COPY (A, i, A, j) ; /* A [i] = A [j] */ \
memcpy (GB_ADDR (A, j), t, csize) ; /* A [j] = t */ \
}
#define GB_LT(less,a,i,b,j) \
{ \
flt (&less, a, b) ; /* less = (a < b) */ \
if (!less) \
{ \
/* check for equality and tie-break on index */ \
bool more ; \
flt (&more, b, a) ; /* more = (b < a) */ \
less = (more) ? false : ((i) < (j)) ; \
} \
}
#undef GB_SORT_UDT
#define GB_SORT_UDT 1
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _UDT)
#include "GB_sort_template.c"
//------------------------------------------------------------------------------
// GB_sort
//------------------------------------------------------------------------------
#undef GB_FREE_WORKSPACE
#define GB_FREE_WORKSPACE \
{ \
GB_WERK_POP (C_ek_slicing, int64_t) ; \
GB_phbix_free (T) ; \
}
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_FREE_WORKSPACE ; \
GB_phbix_free (C) ; \
GB_phbix_free (P) ; \
}
GrB_Info GB_sort
(
// output:
GrB_Matrix C, // matrix with sorted vectors on output
GrB_Matrix P, // matrix with permutations on output
// input:
GrB_BinaryOp op, // comparator for the sort
GrB_Matrix A, // matrix to sort
const bool A_transpose, // false: sort each row, true: sort each column
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT_MATRIX_OK (A, "A for GB_sort", GB0) ;
ASSERT_BINARYOP_OK (op, "op for GB_sort", GB0) ;
GrB_Matrix T = NULL ;
struct GB_Matrix_opaque T_header ;
GB_WERK_DECLARE (C_ek_slicing, int64_t) ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
bool C_is_NULL = (C == NULL) ;
if (C_is_NULL && P == NULL)
{
// either C, or P, or both must be present
return (GrB_NULL_POINTER) ;
}
GrB_Type atype = A->type ;
GrB_Type ctype = (C_is_NULL) ? atype : C->type ;
GrB_Type ptype = (P == NULL) ? GrB_INT64 : P->type ;
if (op->ztype != GrB_BOOL || op->xtype != op->ytype || atype != ctype
|| !(ptype == GrB_INT64 || ptype == GrB_UINT64)
|| !GB_Type_compatible (atype, op->xtype))
{
// op must return bool, and its inputs x and y must have the same type;
// the types of A and C must match exactly; P must be INT64 or UINT64;
// A and C must be typecasted to the input type of the op.
return (GrB_DOMAIN_MISMATCH) ;
}
int64_t anrows = GB_NROWS (A) ;
int64_t ancols = GB_NCOLS (A) ;
if ((C != NULL && (GB_NROWS (C) != anrows || GB_NCOLS (C) != ancols)) ||
(P != NULL && (GB_NROWS (P) != anrows || GB_NCOLS (P) != ancols)))
{
// C and P must have the same dimensions as A
return (GrB_DIMENSION_MISMATCH) ;
}
bool A_iso = A->iso ;
bool sort_in_place = (A == C) ;
// free any prior content of C and P
GB_phbix_free (P) ;
if (!sort_in_place)
{
GB_phbix_free (C) ;
}
//--------------------------------------------------------------------------
// make a copy of A, unless it is aliased with C
//--------------------------------------------------------------------------
if (C_is_NULL)
{
// C is a temporary matrix, which is freed when done
T = GB_clear_static_header (&T_header) ;
C = T ;
}
if (A_transpose)
{
// ensure C is in sparse or hypersparse CSC format
if (A->is_csc)
{
// A is already CSC
if (!sort_in_place)
{
// A = C
GB_OK (GB_dup_worker (&C, A_iso, A, true, atype, Context)) ;
}
}
else
{
// A is CSR but C must be CSC
if (sort_in_place)
{
// A = A'
GB_OK (GB_transpose_in_place (A, true, Context)) ;
}
else
{
// C = A'
GB_OK (GB_transpose_cast (C, atype, true, A, false, Context)) ;
}
}
}
else
{
// ensure C is in sparse or hypersparse CSR format
if (!A->is_csc)
{
// A is already CSR
if (!sort_in_place)
{
// A = C
GB_OK (GB_dup_worker (&C, A_iso, A, true, atype, Context)) ;
}
}
else
{
// A is CSC but C must be CSR
if (sort_in_place)
{
// A = A'
GB_OK (GB_transpose_in_place (A, false, Context)) ;
}
else
{
// C = A'
GB_OK (GB_transpose_cast (C, atype, false, A, false, Context)) ;
}
}
}
// ensure C is sparse or hypersparse CSC
if (GB_IS_BITMAP (C) || GB_IS_FULL (C))
{
GB_OK (GB_convert_any_to_sparse (C, Context)) ;
}
//--------------------------------------------------------------------------
// sort C in place
//--------------------------------------------------------------------------
GB_Opcode opcode = op->opcode ;
GB_Type_code acode = atype->code ;
if ((op->xtype == atype) && (op->ytype == atype) &&
(opcode == GB_LT_binop_code || opcode == GB_GT_binop_code) &&
(acode < GB_UDT_code))
{
//----------------------------------------------------------------------
// no typecasting, using built-in < or > operators, builtin types
//----------------------------------------------------------------------
if (opcode == GB_LT_binop_code)
{
// ascending sort
switch (acode)
{
case GB_BOOL_code :
GB_OK (GB(sort_matrix_ascend_BOOL )(C, Context)) ; break ;
case GB_INT8_code :
GB_OK (GB(sort_matrix_ascend_INT8 )(C, Context)) ; break ;
case GB_INT16_code :
GB_OK (GB(sort_matrix_ascend_INT16 )(C, Context)) ; break ;
case GB_INT32_code :
GB_OK (GB(sort_matrix_ascend_INT32 )(C, Context)) ; break ;
case GB_INT64_code :
GB_OK (GB(sort_matrix_ascend_INT64 )(C, Context)) ; break ;
case GB_UINT8_code :
GB_OK (GB(sort_matrix_ascend_UINT8 )(C, Context)) ; break ;
case GB_UINT16_code :
GB_OK (GB(sort_matrix_ascend_UINT16 )(C, Context)) ; break ;
case GB_UINT32_code :
GB_OK (GB(sort_matrix_ascend_UINT32 )(C, Context)) ; break ;
case GB_UINT64_code :
GB_OK (GB(sort_matrix_ascend_UINT64 )(C, Context)) ; break ;
case GB_FP32_code :
GB_OK (GB(sort_matrix_ascend_FP32 )(C, Context)) ; break ;
case GB_FP64_code :
GB_OK (GB(sort_matrix_ascend_FP64 )(C, Context)) ; break ;
default:;
}
}
else // opcode == GB_GT_binop_code
{
// descending sort
switch (acode)
{
case GB_BOOL_code :
GB_OK (GB(sort_matrix_descend_BOOL )(C, Context)) ; break ;
case GB_INT8_code :
GB_OK (GB(sort_matrix_descend_INT8 )(C, Context)) ; break ;
case GB_INT16_code :
GB_OK (GB(sort_matrix_descend_INT16 )(C, Context)) ; break ;
case GB_INT32_code :
GB_OK (GB(sort_matrix_descend_INT32 )(C, Context)) ; break ;
case GB_INT64_code :
GB_OK (GB(sort_matrix_descend_INT64 )(C, Context)) ; break ;
case GB_UINT8_code :
GB_OK (GB(sort_matrix_descend_UINT8 )(C, Context)) ; break ;
case GB_UINT16_code :
GB_OK (GB(sort_matrix_descend_UINT16)(C, Context)) ; break ;
case GB_UINT32_code :
GB_OK (GB(sort_matrix_descend_UINT32)(C, Context)) ; break ;
case GB_UINT64_code :
GB_OK (GB(sort_matrix_descend_UINT64)(C, Context)) ; break ;
case GB_FP32_code :
GB_OK (GB(sort_matrix_descend_FP32 )(C, Context)) ; break ;
case GB_FP64_code :
GB_OK (GB(sort_matrix_descend_FP64 )(C, Context)) ; break ;
default:;
}
}
}
else
{
//----------------------------------------------------------------------
// typecasting, user-defined types, or unconventional operators
//----------------------------------------------------------------------
GB_OK (GB (sort_matrix_UDT) (C, op, Context)) ;
}
//--------------------------------------------------------------------------
// constuct the final indices
//--------------------------------------------------------------------------
int64_t cnz = GB_nnz (C) ;
int64_t cnvec = C->nvec ;
int64_t *restrict Ti = NULL ;
if (P == NULL)
{
// P is not constructed; use C->i to construct the new indices
Ti = C->i ;
}
else
{
// allocate P->i and use it to construct the new indices
P->i = GB_MALLOC (cnz, int64_t, &(P->i_size)) ;
if (P->i == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
Ti = P->i ;
}
int C_nthreads, C_ntasks ;
GB_SLICE_MATRIX (C, 1, chunk) ;
int64_t *restrict Cp = C->p ;
const int64_t cvlen = C->vlen ;
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static,1)
for (tid = 0 ; tid < C_ntasks ; tid++)
{
int64_t kfirst = kfirst_Cslice [tid] ;
int64_t klast = klast_Cslice [tid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
const int64_t pC0 = Cp [k] ;
int64_t pC_start, pC_end ;
GB_get_pA (&pC_start, &pC_end, tid, k,
kfirst, klast, pstart_Cslice, Cp, cvlen) ;
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
Ti [pC] = pC - pC0 ;
}
}
}
//--------------------------------------------------------------------------
// construct P
//--------------------------------------------------------------------------
bool C_is_hyper = GB_IS_HYPERSPARSE (C) ;
if (P != NULL)
{
P->is_csc = C->is_csc ;
P->nvec = C->nvec ;
P->nvec_nonempty = C->nvec_nonempty ;
P->iso = false ;
P->vlen = C->vlen ;
P->vdim = C->vdim ;
if (C_is_NULL)
{
// the values of C are not needed. The indices of C become the
// values of P, Cp becomes Pp, and Ch (if present) becomes Ph.
P->x = C->i ; C->i = NULL ; P->x_size = C->i_size ;
P->p = C->p ; C->p = NULL ; P->p_size = C->p_size ;
P->h = C->h ; C->h = NULL ; P->h_size = C->h_size ;
P->plen = C->plen ;
}
else
{
// C is required on output. The indices of C are copied and
// become the values of P. Cp is copied to Pp, and Ch (if present)
// is copied to Ph.
P->plen = cnvec ;
P->x = GB_MALLOC (cnz, int64_t, &(P->x_size)) ;
P->p = GB_MALLOC (cnvec+1, int64_t, &(P->p_size)) ;
P->h = NULL ;
if (C_is_hyper)
{
P->h = GB_MALLOC (cnvec, int64_t, &(P->h_size)) ;
}
if (P->x == NULL || P->p == NULL || (C_is_hyper && P->h == NULL))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// copy from C to P
GB_memcpy (P->x, C->i, cnz * sizeof (int64_t), nthreads_max) ;
GB_memcpy (P->p, C->p, (cnvec+1) * sizeof (int64_t), nthreads_max) ;
if (C_is_hyper)
{
GB_memcpy (P->h, C->h, cnvec * sizeof (int64_t), nthreads_max) ;
}
}
P->magic = GB_MAGIC ;
}
//--------------------------------------------------------------------------
// finalize the pattern of C
//--------------------------------------------------------------------------
if (!C_is_NULL && P != NULL)
{
// copy P->i into C->i
GB_memcpy (C->i, P->i, cnz * sizeof (int64_t), nthreads_max) ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
if (!C_is_NULL) { ASSERT_MATRIX_OK (C, "C output of GB_sort", GB0) ; }
if (P != NULL) { ASSERT_MATRIX_OK (P, "P output of GB_sort", GB0) ; }
return (GrB_SUCCESS) ;
}
|
aac.c | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<math.h>
#include<unistd.h>
#include<omp.h>
#define NODE_SAYISI 5
#define HEDEF_DUGUM 4
#define BASLANGIC_DURUMU 0
#define FEROMON_ESIGI 1
#define FEROMON_SALGI_MIKTARI 0.2
#define KARINCA_SAYISI 100
#define MAX_YOL_UZUNLUGU 30
#define TRUE 1
#define FALSE 0
#define NO_WAY 999
#define NO_CHOOSEN_WAY 99
_Bool komsuluk_matrisi[NODE_SAYISI][NODE_SAYISI];
int agirlik_matrisi[NODE_SAYISI][NODE_SAYISI];
float feromon_matrisi[NODE_SAYISI][NODE_SAYISI];
int yol[KARINCA_SAYISI][MAX_YOL_UZUNLUGU];
void create_graph(){
for(int i=0;i<NODE_SAYISI;i++)
for(int j=0; j<NODE_SAYISI; j++)
feromon_matrisi[i][j] = 0;
komsuluk_matrisi[0][1] = TRUE;
agirlik_matrisi[0][1] = 5;
komsuluk_matrisi[0][2] = TRUE;
agirlik_matrisi[0][2] = 6;
komsuluk_matrisi[1][3] = TRUE;
agirlik_matrisi[1][3] = 5;
komsuluk_matrisi[2][1] = TRUE;
agirlik_matrisi[2][1] = 2;
komsuluk_matrisi[3][4] = TRUE;
agirlik_matrisi[3][4] = 10;
komsuluk_matrisi[2][3] = TRUE;
agirlik_matrisi[2][3] = 1;
komsuluk_matrisi[1][2] = TRUE;
agirlik_matrisi[1][2] = 2;
}
void print_feromon_matrisi(){
for (int i = 0; i < NODE_SAYISI; i++)
for(int j = 0; j<NODE_SAYISI; j++)
printf("%d'den %d'ye: %f\n", i,j,feromon_matrisi[i][j]);
}
void print_ant_way(int karinca_no){
printf("karinca no: %d, yol: ", karinca_no);
int temp_uzunluk = 0;
for(int i = 0; i<MAX_YOL_UZUNLUGU; i++){
if(yol[karinca_no][i+1] != NO_WAY){
printf(" %d ->", yol[karinca_no][i]);
temp_uzunluk += agirlik_matrisi[yol[karinca_no][i]][yol[karinca_no][i+1]];
}else{
printf("%d, toplam uzunluk: %d\n", yol[karinca_no][i], temp_uzunluk);
break;
}
}
}
int find_possible_ways_and_get_counts(int durum, int *gidilebilecek_olasi_yollar){
int temp_sayac = 0;
for(int i = 0; i < NODE_SAYISI; i++){
if(komsuluk_matrisi[durum][i]){
gidilebilecek_olasi_yollar[temp_sayac] = i;
temp_sayac++;
}
}
return temp_sayac;
}
int smell_ways_and_get_feromon(int durum, int *gidilebilecek_olasi_yollar, int temp_sayac){
int secilen_yol = NO_CHOOSEN_WAY;
for(int i = 0; i<temp_sayac; i++){
if(feromon_matrisi[durum][gidilebilecek_olasi_yollar[i]] > FEROMON_ESIGI){
secilen_yol = gidilebilecek_olasi_yollar[i];
// printf("ferr seçildi, %d->%d\n", durum,secilen_yol);
}
}
return secilen_yol;
}
int get_random_way(int *gidilebilecek_olasi_yollar, int temp_sayac){
sleep(0.2); // for more randomizing
return gidilebilecek_olasi_yollar[abs(rand()%temp_sayac)];
}
int main(int argc, char const *argv[]) {
create_graph();
int durum;
srand(time(NULL));
int temp_sayac;
int gidilebilecek_olasi_yollar[NODE_SAYISI];
int iterasyon_no;
int secilen_yol;
#pragma omp parallel for num_threads(4) private(durum, iterasyon_no, secilen_yol, temp_sayac, gidilebilecek_olasi_yollar)
for(int karinca_no = 0; karinca_no < KARINCA_SAYISI; karinca_no++){
durum = BASLANGIC_DURUMU;
yol[karinca_no][0] = durum;
iterasyon_no = 1; // Başlangıç durumuna gelmiş olması da bir iterasyon
secilen_yol = NO_CHOOSEN_WAY;
for(int i = 1; i<MAX_YOL_UZUNLUGU; i++)
yol[karinca_no][i] = NO_WAY;
while(durum != HEDEF_DUGUM && iterasyon_no < MAX_YOL_UZUNLUGU){
temp_sayac = find_possible_ways_and_get_counts(durum, gidilebilecek_olasi_yollar);
secilen_yol = smell_ways_and_get_feromon(durum, gidilebilecek_olasi_yollar, temp_sayac);
if(secilen_yol == NO_CHOOSEN_WAY)
secilen_yol = get_random_way(gidilebilecek_olasi_yollar, temp_sayac);
#pragma omp critical
feromon_matrisi[durum][secilen_yol] += FEROMON_SALGI_MIKTARI/agirlik_matrisi[durum][secilen_yol];
durum = secilen_yol;
yol[karinca_no][iterasyon_no] = secilen_yol;
iterasyon_no++;
}
}
for(int i=0; i<KARINCA_SAYISI;i++)
print_ant_way(i);
return 0;
}
|
ams.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "float.h"
#include "ams.h"
#include "_hypre_utilities.hpp"
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax
*
* Relaxation on the ParCSR matrix A with right-hand side f and
* initial guess u. Possible values for relax_type are:
*
* 1 = l1-scaled (or weighted) Jacobi
* 2 = l1-scaled block Gauss-Seidel/SSOR
* 3 = Kaczmarz
* 4 = truncated version of 2 (Remark 6.2 in smoothers paper)
* x = BoomerAMG relaxation with relax_type = |x|
* (16 = Cheby)
*
* The default value of relax_type is 2.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRRelax( hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
HYPRE_Int relax_type, /* relaxation type */
HYPRE_Int relax_times, /* number of sweeps */
HYPRE_Real *l1_norms, /* l1 norms of the rows of A */
HYPRE_Real relax_weight, /* damping coefficient (usually <= 1) */
HYPRE_Real omega, /* SOR parameter (usually in (0,2) */
HYPRE_Real max_eig_est, /* for cheby smoothers */
HYPRE_Real min_eig_est,
HYPRE_Int cheby_order,
HYPRE_Real cheby_fraction,
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v, /* temporary vector */
hypre_ParVector *z /* temporary vector */ )
{
HYPRE_Int sweep;
for (sweep = 0; sweep < relax_times; sweep++)
{
if (relax_type == 1) /* l1-scaled Jacobi */
{
hypre_BoomerAMGRelax(A, f, NULL, 7, 0, relax_weight, 1.0, l1_norms, u, v, z);
}
else if (relax_type == 2 || relax_type == 4) /* offd-l1-scaled block GS */
{
/* !!! Note: relax_weight and omega flipped !!! */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, NULL, 0, omega, relax_weight, l1_norms, u, v, z,
1, 1 /* symm */);
}
else
#endif
{
hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, NULL, 0, omega, relax_weight, l1_norms, u, v, z,
1, 1 /* symm */, 0 /* skip diag */, 1, 0);
}
}
else if (relax_type == 3) /* Kaczmarz */
{
hypre_BoomerAMGRelax(A, f, NULL, 20, 0, relax_weight, omega, l1_norms, u, v, z);
}
else /* call BoomerAMG relaxation */
{
if (relax_type == 16)
{
hypre_ParCSRRelax_Cheby(A, f, max_eig_est, min_eig_est, cheby_fraction, cheby_order, 1,
0, u, v, z);
}
else
{
hypre_BoomerAMGRelax(A, f, NULL, hypre_abs(relax_type), 0, relax_weight,
omega, l1_norms, u, v, z);
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInRangeOf
*
* Return a vector that belongs to the range of a given matrix.
*--------------------------------------------------------------------------*/
hypre_ParVector *hypre_ParVectorInRangeOf(hypre_ParCSRMatrix *A)
{
hypre_ParVector *x;
x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(x);
hypre_ParVectorOwnsData(x) = 1;
return x;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInDomainOf
*
* Return a vector that belongs to the domain of a given matrix.
*--------------------------------------------------------------------------*/
hypre_ParVector *hypre_ParVectorInDomainOf(hypre_ParCSRMatrix *A)
{
hypre_ParVector *x;
x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixColStarts(A));
hypre_ParVectorInitialize(x);
hypre_ParVectorOwnsData(x) = 1;
return x;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorBlockSplit
*
* Extract the dim sub-vectors x_0,...,x_{dim-1} composing a parallel
* block vector x. It is assumed that &x[i] = [x_0[i],...,x_{dim-1}[i]].
*--------------------------------------------------------------------------*/
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
template<HYPRE_Int dir>
__global__ void
hypreCUDAKernel_ParVectorBlockSplitGather(HYPRE_Int size,
HYPRE_Int dim,
HYPRE_Real *x0,
HYPRE_Real *x1,
HYPRE_Real *x2,
HYPRE_Real *x)
{
const HYPRE_Int i = hypre_cuda_get_grid_thread_id<1, 1>();
if (i >= size * dim)
{
return;
}
HYPRE_Real *xx[3];
xx[0] = x0;
xx[1] = x1;
xx[2] = x2;
const HYPRE_Int d = i % dim;
const HYPRE_Int k = i / dim;
if (dir == 0)
{
xx[d][k] = x[i];
}
else if (dir == 1)
{
x[i] = xx[d][k];
}
}
#endif
HYPRE_Int hypre_ParVectorBlockSplit(hypre_ParVector *x,
hypre_ParVector *x_[3],
HYPRE_Int dim)
{
HYPRE_Int i, d, size_;
HYPRE_Real *x_data, *x_data_[3];
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParVectorMemoryLocation(x) );
#endif
size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0]));
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
for (d = 0; d < dim; d++)
{
x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d]));
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(size_ * dim, "thread", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ParVectorBlockSplitGather<0>, gDim, bDim,
size_, dim, x_data_[0], x_data_[1], x_data_[2], x_data);
}
else
#endif
{
for (i = 0; i < size_; i++)
for (d = 0; d < dim; d++)
{
x_data_[d][i] = x_data[dim * i + d];
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorBlockGather
*
* Compose a parallel block vector x from dim given sub-vectors
* x_0,...,x_{dim-1}, such that &x[i] = [x_0[i],...,x_{dim-1}[i]].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParVectorBlockGather(hypre_ParVector *x,
hypre_ParVector *x_[3],
HYPRE_Int dim)
{
HYPRE_Int i, d, size_;
HYPRE_Real *x_data, *x_data_[3];
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParVectorMemoryLocation(x) );
#endif
size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0]));
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
for (d = 0; d < dim; d++)
{
x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d]));
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(size_ * dim, "thread", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ParVectorBlockSplitGather<1>, gDim, bDim,
size_, dim, x_data_[0], x_data_[1], x_data_[2], x_data);
}
else
#endif
{
for (i = 0; i < size_; i++)
for (d = 0; d < dim; d++)
{
x_data[dim * i + d] = x_data_[d][i];
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGBlockSolve
*
* Apply the block-diagonal solver diag(B) to the system diag(A) x = b.
* Here B is a given BoomerAMG solver for A, while x and b are "block"
* parallel vectors.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BoomerAMGBlockSolve(void *B,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
HYPRE_Int d, dim = 1;
hypre_ParVector *b_[3];
hypre_ParVector *x_[3];
dim = hypre_ParVectorGlobalSize(x) / hypre_ParCSRMatrixGlobalNumRows(A);
if (dim == 1)
{
hypre_BoomerAMGSolve(B, A, b, x);
return hypre_error_flag;
}
for (d = 0; d < dim; d++)
{
b_[d] = hypre_ParVectorInRangeOf(A);
x_[d] = hypre_ParVectorInRangeOf(A);
}
hypre_ParVectorBlockSplit(b, b_, dim);
hypre_ParVectorBlockSplit(x, x_, dim);
for (d = 0; d < dim; d++)
{
hypre_BoomerAMGSolve(B, A, b_[d], x_[d]);
}
hypre_ParVectorBlockGather(x, x_, dim);
for (d = 0; d < dim; d++)
{
hypre_ParVectorDestroy(b_[d]);
hypre_ParVectorDestroy(x_[d]);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixFixZeroRows
*
* For every zero row in the matrix: set the diagonal element to 1.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRMatrixFixZeroRowsHost(hypre_ParCSRMatrix *A)
{
HYPRE_Int i, j;
HYPRE_Real l1_norm;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
/* a row will be considered zero if its l1 norm is less than eps */
HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */
for (i = 0; i < num_rows; i++)
{
l1_norm = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)
{
l1_norm += fabs(A_diag_data[j]);
}
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)
{
l1_norm += fabs(A_offd_data[j]);
}
if (l1_norm <= eps)
{
for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)
if (A_diag_J[j] == i)
{
A_diag_data[j] = 1.0;
}
else
{
A_diag_data[j] = 0.0;
}
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)
{
A_offd_data[j] = 0.0;
}
}
}
return hypre_error_flag;
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
__global__ void
hypreCUDAKernel_ParCSRMatrixFixZeroRows( HYPRE_Int nrows,
HYPRE_Int *A_diag_i,
HYPRE_Int *A_diag_j,
HYPRE_Complex *A_diag_data,
HYPRE_Int *A_offd_i,
HYPRE_Complex *A_offd_data,
HYPRE_Int num_cols_offd)
{
HYPRE_Int row_i = hypre_cuda_get_grid_warp_id<1, 1>();
if (row_i >= nrows)
{
return;
}
HYPRE_Int lane = hypre_cuda_get_lane_id<1>();
HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */
HYPRE_Real l1_norm = 0.0;
HYPRE_Int p1, q1, p2 = 0, q2 = 0;
if (lane < 2)
{
p1 = read_only_load(A_diag_i + row_i + lane);
if (num_cols_offd)
{
p2 = read_only_load(A_offd_i + row_i + lane);
}
}
q1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 1);
p1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 0);
if (num_cols_offd)
{
q2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 1);
p2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 0);
}
for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE)
{
l1_norm += fabs(A_diag_data[j]);
}
for (HYPRE_Int j = p2 + lane; j < q2; j += HYPRE_WARP_SIZE)
{
l1_norm += fabs(A_offd_data[j]);
}
l1_norm = warp_allreduce_sum(l1_norm);
if (l1_norm <= eps)
{
for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE)
{
if (row_i == read_only_load(&A_diag_j[j]))
{
A_diag_data[j] = 1.0;
}
else
{
A_diag_data[j] = 0.0;
}
}
for (HYPRE_Int j = p2 + lane; j < q2; j += HYPRE_WARP_SIZE)
{
A_offd_data[j] = 0.0;
}
}
}
HYPRE_Int hypre_ParCSRMatrixFixZeroRowsDevice(hypre_ParCSRMatrix *A)
{
HYPRE_Int nrows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
dim3 bDim, gDim;
bDim = hypre_GetDefaultCUDABlockDimension();
gDim = hypre_GetDefaultCUDAGridDimension(nrows, "warp", bDim);
HYPRE_CUDA_LAUNCH(hypreCUDAKernel_ParCSRMatrixFixZeroRows, gDim, bDim,
nrows, A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_data, num_cols_offd);
//hypre_SyncCudaComputeStream(hypre_handle());
return hypre_error_flag;
}
#endif
HYPRE_Int hypre_ParCSRMatrixFixZeroRows(hypre_ParCSRMatrix *A)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
return hypre_ParCSRMatrixFixZeroRowsDevice(A);
}
else
#endif
{
return hypre_ParCSRMatrixFixZeroRowsHost(A);
}
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing"
*
* The above computations are done in a CF manner, whenever the provided
* cf_marker is not NULL.
*--------------------------------------------------------------------------*/
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
struct l1_norm_op1 : public thrust::binary_function<HYPRE_Complex, HYPRE_Complex, HYPRE_Complex>
{
__host__ __device__
HYPRE_Complex operator()(HYPRE_Complex &x, HYPRE_Complex &y) const
{
return x <= 4.0 / 3.0 * y ? y : x;
}
};
#endif
HYPRE_Int hypre_ParCSRComputeL1Norms(hypre_ParCSRMatrix *A,
HYPRE_Int option,
HYPRE_Int *cf_marker,
HYPRE_Real **l1_norm_ptr)
{
HYPRE_Int i;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_MemoryLocation memory_location_l1 = hypre_ParCSRMatrixMemoryLocation(A);
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( memory_location_l1 );
if (exec == HYPRE_EXEC_HOST)
{
HYPRE_Int num_threads = hypre_NumThreads();
if (num_threads > 1)
{
return hypre_ParCSRComputeL1NormsThreads(A, option, num_threads, cf_marker, l1_norm_ptr);
}
}
HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_l1);
HYPRE_MemoryLocation memory_location_tmp = exec == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST :
HYPRE_MEMORY_DEVICE;
HYPRE_Real *diag_tmp = NULL;
HYPRE_Int *cf_marker_offd = NULL;
/* collect the cf marker data from other procs */
if (cf_marker != NULL)
{
HYPRE_Int num_sends;
HYPRE_Int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
{
cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, memory_location_tmp);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
{
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
memory_location_tmp);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg);
HYPRE_THRUST_CALL( gather,
hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg),
hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg) + hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends),
cf_marker,
int_buf_data );
}
else
#endif
{
HYPRE_Int index = 0;
HYPRE_Int start;
HYPRE_Int j;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
}
}
comm_handle = hypre_ParCSRCommHandleCreate_v2(11, comm_pkg, memory_location_tmp, int_buf_data,
memory_location_tmp, cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, memory_location_tmp);
}
if (option == 1)
{
/* Set the l1 norm of the diag part */
hypre_CSRMatrixComputeRowSum(A_diag, cf_marker, cf_marker, l1_norm, 1, 1.0, "set");
/* Add the l1 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, cf_marker, cf_marker_offd, l1_norm, 1, 1.0, "add");
}
}
else if (option == 2)
{
/* Set the abs(diag) element */
hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1);
/* Add the l1 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, cf_marker, cf_marker, l1_norm, 1, 1.0, "add");
}
}
else if (option == 3)
{
/* Set the CF l2 norm of the diag part */
hypre_CSRMatrixComputeRowSum(A_diag, NULL, NULL, l1_norm, 2, 1.0, "set");
/* Add the CF l2 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, NULL, NULL, l1_norm, 2, 1.0, "add");
}
}
else if (option == 4)
{
/* Set the abs(diag) element */
hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1);
diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp);
hypre_TMemcpy(diag_tmp, l1_norm, HYPRE_Real, num_rows, memory_location_tmp, memory_location_l1);
/* Add the scaled l1 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, cf_marker, cf_marker_offd, l1_norm, 1, 0.5, "add");
}
/* Truncate according to Remark 6.2 */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
HYPRE_THRUST_CALL( transform, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm, l1_norm_op1() );
}
else
#endif
{
for (i = 0; i < num_rows; i++)
{
if (l1_norm[i] <= 4.0 / 3.0 * diag_tmp[i])
{
l1_norm[i] = diag_tmp[i];
}
}
}
}
else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */
{
/* Set the diag element */
hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 0);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if ( exec == HYPRE_EXEC_DEVICE)
{
thrust::identity<HYPRE_Complex> identity;
HYPRE_THRUST_CALL( replace_if, l1_norm, l1_norm + num_rows, thrust::not1(identity), 1.0 );
}
else
#endif
{
for (i = 0; i < num_rows; i++)
{
if (l1_norm[i] == 0.0)
{
l1_norm[i] = 1.0;
}
}
}
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/* Handle negative definite matrices */
if (!diag_tmp)
{
diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp);
}
/* Set the diag element */
hypre_CSRMatrixExtractDiagonal(A_diag, diag_tmp, 0);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
HYPRE_THRUST_CALL( transform_if, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm,
thrust::negate<HYPRE_Real>(),
is_negative<HYPRE_Real>() );
//bool any_zero = HYPRE_THRUST_CALL( any_of, l1_norm, l1_norm + num_rows, thrust::not1(thrust::identity<HYPRE_Complex>()) );
bool any_zero = 0.0 == HYPRE_THRUST_CALL( reduce, l1_norm, l1_norm + num_rows, 1.0,
thrust::minimum<HYPRE_Real>() );
if ( any_zero )
{
hypre_error_in_arg(1);
}
}
else
#endif
{
for (i = 0; i < num_rows; i++)
{
if (diag_tmp[i] < 0.0)
{
l1_norm[i] = -l1_norm[i];
}
}
for (i = 0; i < num_rows; i++)
{
/* if (fabs(l1_norm[i]) < DBL_EPSILON) */
if (fabs(l1_norm[i]) == 0.0)
{
hypre_error_in_arg(1);
break;
}
}
}
hypre_TFree(cf_marker_offd, memory_location_tmp);
hypre_TFree(diag_tmp, memory_location_tmp);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetDiagRows
*
* For every row containing only a diagonal element: set it to d.
*--------------------------------------------------------------------------*/
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
__global__ void
hypreCUDAKernel_ParCSRMatrixSetDiagRows(HYPRE_Int nrows,
HYPRE_Int *A_diag_I,
HYPRE_Int *A_diag_J,
HYPRE_Complex *A_diag_data,
HYPRE_Int *A_offd_I,
HYPRE_Int num_cols_offd,
HYPRE_Real d)
{
const HYPRE_Int i = hypre_cuda_get_grid_thread_id<1, 1>();
if (i >= nrows)
{
return;
}
HYPRE_Int j = read_only_load(&A_diag_I[i]);
if ( (read_only_load(&A_diag_I[i + 1]) == j + 1) && (read_only_load(&A_diag_J[j]) == i) &&
(!num_cols_offd || (read_only_load(&A_offd_I[i + 1]) == read_only_load(&A_offd_I[i]))) )
{
A_diag_data[j] = d;
}
}
#endif
HYPRE_Int hypre_ParCSRMatrixSetDiagRows(hypre_ParCSRMatrix *A, HYPRE_Real d)
{
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(num_rows, "thread", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ParCSRMatrixSetDiagRows, gDim, bDim,
num_rows, A_diag_I, A_diag_J, A_diag_data, A_offd_I, num_cols_offd, d);
}
else
#endif
{
for (i = 0; i < num_rows; i++)
{
j = A_diag_I[i];
if ((A_diag_I[i + 1] == j + 1) && (A_diag_J[j] == i) &&
(!num_cols_offd || (A_offd_I[i + 1] == A_offd_I[i])))
{
A_diag_data[j] = d;
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSCreate
*
* Allocate the AMS solver structure.
*--------------------------------------------------------------------------*/
void * hypre_AMSCreate()
{
hypre_AMSData *ams_data;
ams_data = hypre_CTAlloc(hypre_AMSData, 1, HYPRE_MEMORY_HOST);
/* Default parameters */
ams_data -> dim = 3; /* 3D problem */
ams_data -> maxit = 20; /* perform at most 20 iterations */
ams_data -> tol = 1e-6; /* convergence tolerance */
ams_data -> print_level = 1; /* print residual norm at each step */
ams_data -> cycle_type = 1; /* a 3-level multiplicative solver */
ams_data -> A_relax_type = 2; /* offd-l1-scaled GS */
ams_data -> A_relax_times = 1; /* one relaxation sweep */
ams_data -> A_relax_weight = 1.0; /* damping parameter */
ams_data -> A_omega = 1.0; /* SSOR coefficient */
ams_data -> A_cheby_order = 2; /* Cheby: order (1 -4 are vaild) */
ams_data -> A_cheby_fraction = .3; /* Cheby: fraction of spectrum to smooth */
ams_data -> B_G_coarsen_type = 10; /* HMIS coarsening */
ams_data -> B_G_agg_levels = 1; /* Levels of aggressive coarsening */
ams_data -> B_G_relax_type = 3; /* hybrid G-S/Jacobi */
ams_data -> B_G_theta = 0.25; /* strength threshold */
ams_data -> B_G_interp_type = 0; /* interpolation type */
ams_data -> B_G_Pmax = 0; /* max nonzero elements in interp. rows */
ams_data -> B_Pi_coarsen_type = 10; /* HMIS coarsening */
ams_data -> B_Pi_agg_levels = 1; /* Levels of aggressive coarsening */
ams_data -> B_Pi_relax_type = 3; /* hybrid G-S/Jacobi */
ams_data -> B_Pi_theta = 0.25; /* strength threshold */
ams_data -> B_Pi_interp_type = 0; /* interpolation type */
ams_data -> B_Pi_Pmax = 0; /* max nonzero elements in interp. rows */
ams_data -> beta_is_zero = 0; /* the problem has a mass term */
/* By default, do l1-GS smoothing on the coarsest grid */
ams_data -> B_G_coarse_relax_type = 8;
ams_data -> B_Pi_coarse_relax_type = 8;
/* The rest of the fields are initialized using the Set functions */
ams_data -> A = NULL;
ams_data -> G = NULL;
ams_data -> A_G = NULL;
ams_data -> B_G = 0;
ams_data -> Pi = NULL;
ams_data -> A_Pi = NULL;
ams_data -> B_Pi = 0;
ams_data -> x = NULL;
ams_data -> y = NULL;
ams_data -> z = NULL;
ams_data -> Gx = NULL;
ams_data -> Gy = NULL;
ams_data -> Gz = NULL;
ams_data -> r0 = NULL;
ams_data -> g0 = NULL;
ams_data -> r1 = NULL;
ams_data -> g1 = NULL;
ams_data -> r2 = NULL;
ams_data -> g2 = NULL;
ams_data -> zz = NULL;
ams_data -> Pix = NULL;
ams_data -> Piy = NULL;
ams_data -> Piz = NULL;
ams_data -> A_Pix = NULL;
ams_data -> A_Piy = NULL;
ams_data -> A_Piz = NULL;
ams_data -> B_Pix = 0;
ams_data -> B_Piy = 0;
ams_data -> B_Piz = 0;
ams_data -> interior_nodes = NULL;
ams_data -> G0 = NULL;
ams_data -> A_G0 = NULL;
ams_data -> B_G0 = 0;
ams_data -> projection_frequency = 5;
ams_data -> A_l1_norms = NULL;
ams_data -> A_max_eig_est = 0;
ams_data -> A_min_eig_est = 0;
ams_data -> owns_Pi = 1;
ams_data -> owns_A_G = 0;
ams_data -> owns_A_Pi = 0;
return (void *) ams_data;
}
/*--------------------------------------------------------------------------
* hypre_AMSDestroy
*
* Deallocate the AMS solver structure. Note that the input data (given
* through the Set functions) is not destroyed.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSDestroy(void *solver)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (!ams_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (ams_data -> owns_A_G)
if (ams_data -> A_G)
{
hypre_ParCSRMatrixDestroy(ams_data -> A_G);
}
if (!ams_data -> beta_is_zero)
if (ams_data -> B_G)
{
HYPRE_BoomerAMGDestroy(ams_data -> B_G);
}
if (ams_data -> owns_Pi && ams_data -> Pi)
{
hypre_ParCSRMatrixDestroy(ams_data -> Pi);
}
if (ams_data -> owns_A_Pi)
if (ams_data -> A_Pi)
{
hypre_ParCSRMatrixDestroy(ams_data -> A_Pi);
}
if (ams_data -> B_Pi)
{
HYPRE_BoomerAMGDestroy(ams_data -> B_Pi);
}
if (ams_data -> owns_Pi && ams_data -> Pix)
{
hypre_ParCSRMatrixDestroy(ams_data -> Pix);
}
if (ams_data -> A_Pix)
{
hypre_ParCSRMatrixDestroy(ams_data -> A_Pix);
}
if (ams_data -> B_Pix)
{
HYPRE_BoomerAMGDestroy(ams_data -> B_Pix);
}
if (ams_data -> owns_Pi && ams_data -> Piy)
{
hypre_ParCSRMatrixDestroy(ams_data -> Piy);
}
if (ams_data -> A_Piy)
{
hypre_ParCSRMatrixDestroy(ams_data -> A_Piy);
}
if (ams_data -> B_Piy)
{
HYPRE_BoomerAMGDestroy(ams_data -> B_Piy);
}
if (ams_data -> owns_Pi && ams_data -> Piz)
{
hypre_ParCSRMatrixDestroy(ams_data -> Piz);
}
if (ams_data -> A_Piz)
{
hypre_ParCSRMatrixDestroy(ams_data -> A_Piz);
}
if (ams_data -> B_Piz)
{
HYPRE_BoomerAMGDestroy(ams_data -> B_Piz);
}
if (ams_data -> r0)
{
hypre_ParVectorDestroy(ams_data -> r0);
}
if (ams_data -> g0)
{
hypre_ParVectorDestroy(ams_data -> g0);
}
if (ams_data -> r1)
{
hypre_ParVectorDestroy(ams_data -> r1);
}
if (ams_data -> g1)
{
hypre_ParVectorDestroy(ams_data -> g1);
}
if (ams_data -> r2)
{
hypre_ParVectorDestroy(ams_data -> r2);
}
if (ams_data -> g2)
{
hypre_ParVectorDestroy(ams_data -> g2);
}
if (ams_data -> zz)
{
hypre_ParVectorDestroy(ams_data -> zz);
}
if (ams_data -> G0)
{
hypre_ParCSRMatrixDestroy(ams_data -> A);
}
if (ams_data -> G0)
{
hypre_ParCSRMatrixDestroy(ams_data -> G0);
}
if (ams_data -> A_G0)
{
hypre_ParCSRMatrixDestroy(ams_data -> A_G0);
}
if (ams_data -> B_G0)
{
HYPRE_BoomerAMGDestroy(ams_data -> B_G0);
}
hypre_SeqVectorDestroy(ams_data -> A_l1_norms);
/* G, x, y ,z, Gx, Gy and Gz are not destroyed */
if (ams_data)
{
hypre_TFree(ams_data, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetDimension
*
* Set problem dimension (2 or 3). By default we assume dim = 3.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetDimension(void *solver,
HYPRE_Int dim)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (dim != 1 && dim != 2 && dim != 3)
{
hypre_error_in_arg(2);
}
ams_data -> dim = dim;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetDiscreteGradient
*
* Set the discrete gradient matrix G.
* This function should be called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetDiscreteGradient(void *solver,
hypre_ParCSRMatrix *G)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> G = G;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetCoordinateVectors
*
* Set the x, y and z coordinates of the vertices in the mesh.
*
* Either SetCoordinateVectors or SetEdgeConstantVectors should be
* called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetCoordinateVectors(void *solver,
hypre_ParVector *x,
hypre_ParVector *y,
hypre_ParVector *z)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> x = x;
ams_data -> y = y;
ams_data -> z = z;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetEdgeConstantVectors
*
* Set the vectors Gx, Gy and Gz which give the representations of
* the constant vector fields (1,0,0), (0,1,0) and (0,0,1) in the
* edge element basis.
*
* Either SetCoordinateVectors or SetEdgeConstantVectors should be
* called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetEdgeConstantVectors(void *solver,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> Gx = Gx;
ams_data -> Gy = Gy;
ams_data -> Gz = Gz;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetInterpolations
*
* Set the (components of) the Nedelec interpolation matrix Pi=[Pix,Piy,Piz].
*
* This function is generally intended to be used only for high-order Nedelec
* discretizations (in the lowest order case, Pi is constructed internally in
* AMS from the discreet gradient matrix and the coordinates of the vertices),
* though it can also be used in the lowest-order case or for other types of
* discretizations (e.g. ones based on the second family of Nedelec elements).
*
* By definition, Pi is the matrix representation of the linear operator that
* interpolates (high-order) vector nodal finite elements into the (high-order)
* Nedelec space. The component matrices are defined as Pix phi = Pi (phi,0,0)
* and similarly for Piy and Piz. Note that all these operators depend on the
* choice of the basis and degrees of freedom in the high-order spaces.
*
* The column numbering of Pi should be node-based, i.e. the x/y/z components of
* the first node (vertex or high-order dof) should be listed first, followed by
* the x/y/z components of the second node and so on (see the documentation of
* HYPRE_BoomerAMGSetDofFunc).
*
* If used, this function should be called before hypre_AMSSetup() and there is
* no need to provide the vertex coordinates. Furthermore, only one of the sets
* {Pi} and {Pix,Piy,Piz} needs to be specified (though it is OK to provide
* both). If Pix is NULL, then scalar Pi-based AMS cycles, i.e. those with
* cycle_type > 10, will be unavailable. Similarly, AMS cycles based on
* monolithic Pi (cycle_type < 10) require that Pi is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetInterpolations(void *solver,
hypre_ParCSRMatrix *Pi,
hypre_ParCSRMatrix *Pix,
hypre_ParCSRMatrix *Piy,
hypre_ParCSRMatrix *Piz)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> Pi = Pi;
ams_data -> Pix = Pix;
ams_data -> Piy = Piy;
ams_data -> Piz = Piz;
ams_data -> owns_Pi = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaPoissonMatrix
*
* Set the matrix corresponding to the Poisson problem with coefficient
* alpha (the curl-curl term coefficient in the Maxwell problem).
*
* If this function is called, the coarse space solver on the range
* of Pi^T is a block-diagonal version of A_Pi. If this function is not
* called, the coarse space solver on the range of Pi^T is constructed
* as Pi^T A Pi in hypre_AMSSetup().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaPoissonMatrix(void *solver,
hypre_ParCSRMatrix *A_Pi)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_Pi = A_Pi;
/* Penalize the eliminated degrees of freedom */
hypre_ParCSRMatrixSetDiagRows(A_Pi, HYPRE_REAL_MAX);
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_Pi)); */
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaPoissonMatrix
*
* Set the matrix corresponding to the Poisson problem with coefficient
* beta (the mass term coefficient in the Maxwell problem).
*
* This function call is optional - if not given, the Poisson matrix will
* be computed in hypre_AMSSetup(). If the given matrix is NULL, we assume
* that beta is 0 and use two-level (instead of three-level) methods.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaPoissonMatrix(void *solver,
hypre_ParCSRMatrix *A_G)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_G = A_G;
if (!A_G)
{
ams_data -> beta_is_zero = 1;
}
else
{
/* Penalize the eliminated degrees of freedom */
hypre_ParCSRMatrixSetDiagRows(A_G, HYPRE_REAL_MAX);
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_G)); */
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetInteriorNodes
*
* Set the list of nodes which are interior to the zero-conductivity region.
* A node is interior if interior_nodes[i] == 1.0.
*
* Should be called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetInteriorNodes(void *solver,
hypre_ParVector *interior_nodes)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> interior_nodes = interior_nodes;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetProjectionFrequency
*
* How often to project the r.h.s. onto the compatible sub-space Ker(G0^T),
* when iterating with the solver.
*
* The default value is every 5th iteration.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetProjectionFrequency(void *solver,
HYPRE_Int projection_frequency)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> projection_frequency = projection_frequency;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetMaxIter
*
* Set the maximum number of iterations in the three-level method.
* The default value is 20. To use the AMS solver as a preconditioner,
* set maxit to 1, tol to 0.0 and print_level to 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetMaxIter(void *solver,
HYPRE_Int maxit)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> maxit = maxit;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetTol
*
* Set the convergence tolerance (if the method is used as a solver).
* The default value is 1e-6.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetTol(void *solver,
HYPRE_Real tol)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> tol = tol;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetCycleType
*
* Choose which three-level solver to use. Possible values are:
*
* 1 = 3-level multipl. solver (01210) <-- small solution time
* 2 = 3-level additive solver (0+1+2)
* 3 = 3-level multipl. solver (02120)
* 4 = 3-level additive solver (010+2)
* 5 = 3-level multipl. solver (0102010) <-- small solution time
* 6 = 3-level additive solver (1+020)
* 7 = 3-level multipl. solver (0201020) <-- small number of iterations
* 8 = 3-level additive solver (0(1+2)0) <-- small solution time
* 9 = 3-level multipl. solver (01210) with discrete divergence
* 11 = 5-level multipl. solver (013454310) <-- small solution time, memory
* 12 = 5-level additive solver (0+1+3+4+5)
* 13 = 5-level multipl. solver (034515430) <-- small solution time, memory
* 14 = 5-level additive solver (01(3+4+5)10)
* 20 = 2-level multipl. solver (0[12]0)
*
* 0 = a Hiptmair-like smoother (010)
*
* The default value is 1.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetCycleType(void *solver,
HYPRE_Int cycle_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> cycle_type = cycle_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetPrintLevel
*
* Control how much information is printed during the solution iterations.
* The defaut values is 1 (print residual norm at each step).
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetPrintLevel(void *solver,
HYPRE_Int print_level)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> print_level = print_level;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetSmoothingOptions
*
* Set relaxation parameters for A. Default values: 2, 1, 1.0, 1.0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetSmoothingOptions(void *solver,
HYPRE_Int A_relax_type,
HYPRE_Int A_relax_times,
HYPRE_Real A_relax_weight,
HYPRE_Real A_omega)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_relax_type = A_relax_type;
ams_data -> A_relax_times = A_relax_times;
ams_data -> A_relax_weight = A_relax_weight;
ams_data -> A_omega = A_omega;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetChebySmoothingOptions
* AB: note: this could be added to the above,
* but I didn't want to change parameter list)
* Set parameters for chebyshev smoother for A. Default values: 2,.3.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetChebySmoothingOptions(void *solver,
HYPRE_Int A_cheby_order,
HYPRE_Int A_cheby_fraction)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_cheby_order = A_cheby_order;
ams_data -> A_cheby_fraction = A_cheby_fraction;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaAMGOptions
*
* Set AMG parameters for B_Pi. Default values: 10, 1, 3, 0.25, 0, 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaAMGOptions(void *solver,
HYPRE_Int B_Pi_coarsen_type,
HYPRE_Int B_Pi_agg_levels,
HYPRE_Int B_Pi_relax_type,
HYPRE_Real B_Pi_theta,
HYPRE_Int B_Pi_interp_type,
HYPRE_Int B_Pi_Pmax)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_Pi_coarsen_type = B_Pi_coarsen_type;
ams_data -> B_Pi_agg_levels = B_Pi_agg_levels;
ams_data -> B_Pi_relax_type = B_Pi_relax_type;
ams_data -> B_Pi_theta = B_Pi_theta;
ams_data -> B_Pi_interp_type = B_Pi_interp_type;
ams_data -> B_Pi_Pmax = B_Pi_Pmax;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaAMGCoarseRelaxType
*
* Set the AMG coarsest level relaxation for B_Pi. Default value: 8.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaAMGCoarseRelaxType(void *solver,
HYPRE_Int B_Pi_coarse_relax_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *)solver;
ams_data -> B_Pi_coarse_relax_type = B_Pi_coarse_relax_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaAMGOptions
*
* Set AMG parameters for B_G. Default values: 10, 1, 3, 0.25, 0, 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaAMGOptions(void *solver,
HYPRE_Int B_G_coarsen_type,
HYPRE_Int B_G_agg_levels,
HYPRE_Int B_G_relax_type,
HYPRE_Real B_G_theta,
HYPRE_Int B_G_interp_type,
HYPRE_Int B_G_Pmax)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_G_coarsen_type = B_G_coarsen_type;
ams_data -> B_G_agg_levels = B_G_agg_levels;
ams_data -> B_G_relax_type = B_G_relax_type;
ams_data -> B_G_theta = B_G_theta;
ams_data -> B_G_interp_type = B_G_interp_type;
ams_data -> B_G_Pmax = B_G_Pmax;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaAMGCoarseRelaxType
*
* Set the AMG coarsest level relaxation for B_G. Default value: 8.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaAMGCoarseRelaxType(void *solver,
HYPRE_Int B_G_coarse_relax_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_G_coarse_relax_type = B_G_coarse_relax_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputePi
*
* Construct the Pi interpolation matrix, which maps the space of vector
* linear finite elements to the space of edge finite elements.
*
* The construction is based on the fact that Pi = [Pi_x, Pi_y, Pi_z],
* where each block has the same sparsity structure as G, and the entries
* can be computed from the vectors Gx, Gy, Gz.
*--------------------------------------------------------------------------*/
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
__global__ void
hypreCUDAKernel_AMSComputePi_copy1(HYPRE_Int nnz,
HYPRE_Int dim,
HYPRE_Int *j_in,
HYPRE_Int *j_out)
{
const HYPRE_Int i = hypre_cuda_get_grid_thread_id<1, 1>();
if (i < nnz)
{
const HYPRE_Int j = dim * i;
for (HYPRE_Int d = 0; d < dim; d++)
{
j_out[j + d] = dim * read_only_load(&j_in[i]) + d;
}
}
}
__global__ void
hypreCUDAKernel_AMSComputePi_copy2(HYPRE_Int nrows,
HYPRE_Int dim,
HYPRE_Int *i_in,
HYPRE_Real *data_in,
HYPRE_Real *Gx_data,
HYPRE_Real *Gy_data,
HYPRE_Real *Gz_data,
HYPRE_Real *data_out)
{
const HYPRE_Int i = hypre_cuda_get_grid_warp_id<1, 1>();
if (i >= nrows)
{
return;
}
const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>();
HYPRE_Int j, istart, iend;
HYPRE_Real t, G[3], *Gdata[3];
Gdata[0] = Gx_data;
Gdata[1] = Gy_data;
Gdata[2] = Gz_data;
if (lane_id < 2)
{
j = read_only_load(i_in + i + lane_id);
}
istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0);
iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1);
if (lane_id < dim)
{
t = read_only_load(Gdata[lane_id] + i);
}
for (HYPRE_Int d = 0; d < dim; d++)
{
G[d] = __shfl_sync(HYPRE_WARP_FULL_MASK, t, d);
}
for (j = istart + lane_id; j < iend; j += HYPRE_WARP_SIZE)
{
const HYPRE_Real v = data_in ? fabs(read_only_load(&data_in[j])) * 0.5 : 1.0;
const HYPRE_Int k = j * dim;
for (HYPRE_Int d = 0; d < dim; d++)
{
data_out[k + d] = v * G[d];
}
}
}
#endif
HYPRE_Int hypre_AMSComputePi(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **Pi_ptr)
{
hypre_ParCSRMatrix *Pi;
/* Compute Pi = [Pi_x, Pi_y, Pi_z] */
{
HYPRE_Int i, j, d;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = dim * hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts;
HYPRE_Int col_starts_size;
HYPRE_Int num_cols_offd = dim * hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = dim * hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = dim * hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G);
col_starts_size = 2;
col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST);
for (i = 0; i < col_starts_size; i++)
{
col_starts[i] = (HYPRE_BigInt)dim * col_starts_G[i];
}
Pi = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Pi) = 1;
hypre_ParCSRMatrixInitialize(Pi);
hypre_TFree(col_starts, HYPRE_MEMORY_HOST);
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
if (dim >= 2)
{
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
}
if (dim == 3)
{
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(G),
hypre_ParCSRMatrixMemoryLocation(Pi) );
#endif
/* Fill-in the diagonal part */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pi_diag = hypre_ParCSRMatrixDiag(Pi);
HYPRE_Int *Pi_diag_I = hypre_CSRMatrixI(Pi_diag);
HYPRE_Int *Pi_diag_J = hypre_CSRMatrixJ(Pi_diag);
HYPRE_Real *Pi_diag_data = hypre_CSRMatrixData(Pi_diag);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
HYPRE_THRUST_CALL( transform,
G_diag_I,
G_diag_I + G_diag_nrows + 1,
Pi_diag_I,
dim * _1 );
dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nnz, "thread", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy1, gDim, bDim,
G_diag_nnz, dim, G_diag_J, Pi_diag_J );
gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nrows, "warp", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy2, gDim, bDim,
G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, Gy_data, Gz_data,
Pi_diag_data );
}
else
#endif
{
for (i = 0; i < G_diag_nrows + 1; i++)
{
Pi_diag_I[i] = dim * G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
for (d = 0; d < dim; d++)
{
Pi_diag_J[dim * i + d] = dim * G_diag_J[i] + d;
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i + 1]; j++)
{
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
if (dim >= 2)
{
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
}
if (dim == 3)
{
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
}
}
/* Fill-in the off-diagonal part */
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pi_offd = hypre_ParCSRMatrixOffd(Pi);
HYPRE_Int *Pi_offd_I = hypre_CSRMatrixI(Pi_offd);
HYPRE_Int *Pi_offd_J = hypre_CSRMatrixJ(Pi_offd);
HYPRE_Real *Pi_offd_data = hypre_CSRMatrixData(Pi_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pi_cmap = hypre_ParCSRMatrixColMapOffd(Pi);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
if (G_offd_ncols)
{
HYPRE_THRUST_CALL( transform,
G_offd_I,
G_offd_I + G_offd_nrows + 1,
Pi_offd_I,
dim * _1 );
}
dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nnz, "thread", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy1, gDim, bDim,
G_offd_nnz, dim, G_offd_J, Pi_offd_J );
gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nrows, "warp", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy2, gDim, bDim,
G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, Gy_data, Gz_data,
Pi_offd_data );
}
else
#endif
{
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows + 1; i++)
{
Pi_offd_I[i] = dim * G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
for (d = 0; d < dim; d++)
{
Pi_offd_J[dim * i + d] = dim * G_offd_J[i] + d;
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i + 1]; j++)
{
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
if (dim >= 2)
{
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
}
if (dim == 3)
{
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
}
}
for (i = 0; i < G_offd_ncols; i++)
for (d = 0; d < dim; d++)
{
Pi_cmap[dim * i + d] = (HYPRE_BigInt)dim * G_cmap[i] + (HYPRE_BigInt)d;
}
}
}
*Pi_ptr = Pi;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputePixyz
*
* Construct the components Pix, Piy, Piz of the interpolation matrix Pi,
* which maps the space of vector linear finite elements to the space of
* edge finite elements.
*
* The construction is based on the fact that each component has the same
* sparsity structure as G, and the entries can be computed from the vectors
* Gx, Gy, Gz.
*--------------------------------------------------------------------------*/
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
__global__ void
hypreCUDAKernel_AMSComputePixyz_copy(HYPRE_Int nrows,
HYPRE_Int dim,
HYPRE_Int *i_in,
HYPRE_Real *data_in,
HYPRE_Real *Gx_data,
HYPRE_Real *Gy_data,
HYPRE_Real *Gz_data,
HYPRE_Real *data_x_out,
HYPRE_Real *data_y_out,
HYPRE_Real *data_z_out )
{
const HYPRE_Int i = hypre_cuda_get_grid_warp_id<1, 1>();
if (i >= nrows)
{
return;
}
const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>();
HYPRE_Int j, istart, iend;
HYPRE_Real t, G[3], *Gdata[3], *Odata[3];
Gdata[0] = Gx_data;
Gdata[1] = Gy_data;
Gdata[2] = Gz_data;
Odata[0] = data_x_out;
Odata[1] = data_y_out;
Odata[2] = data_z_out;
if (lane_id < 2)
{
j = read_only_load(i_in + i + lane_id);
}
istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0);
iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1);
if (lane_id < dim)
{
t = read_only_load(Gdata[lane_id] + i);
}
for (HYPRE_Int d = 0; d < dim; d++)
{
G[d] = __shfl_sync(HYPRE_WARP_FULL_MASK, t, d);
}
for (j = istart + lane_id; j < iend; j += HYPRE_WARP_SIZE)
{
const HYPRE_Real v = data_in ? fabs(read_only_load(&data_in[j])) * 0.5 : 1.0;
for (HYPRE_Int d = 0; d < dim; d++)
{
Odata[d][j] = v * G[d];
}
}
}
#endif
HYPRE_Int hypre_AMSComputePixyz(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **Pix_ptr,
hypre_ParCSRMatrix **Piy_ptr,
hypre_ParCSRMatrix **Piz_ptr)
{
hypre_ParCSRMatrix *Pix, *Piy, *Piz;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(G) );
#endif
/* Compute Pix, Piy, Piz */
{
HYPRE_Int i, j;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(G);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
Pix = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Pix) = 1;
hypre_ParCSRMatrixInitialize(Pix);
if (dim >= 2)
{
Piy = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Piy) = 1;
hypre_ParCSRMatrixInitialize(Piy);
}
if (dim == 3)
{
Piz = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Piz) = 1;
hypre_ParCSRMatrixInitialize(Piz);
}
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
if (dim >= 2)
{
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
}
if (dim == 3)
{
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
}
/* Fill-in the diagonal part */
if (dim == 3)
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix);
HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag);
HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag);
HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag);
hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy);
HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag);
HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag);
HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag);
hypre_CSRMatrix *Piz_diag = hypre_ParCSRMatrixDiag(Piz);
HYPRE_Int *Piz_diag_I = hypre_CSRMatrixI(Piz_diag);
HYPRE_Int *Piz_diag_J = hypre_CSRMatrixJ(Piz_diag);
HYPRE_Real *Piz_diag_data = hypre_CSRMatrixData(Piz_diag);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
HYPRE_THRUST_CALL( copy_n,
thrust::make_zip_iterator(thrust::make_tuple(G_diag_I, G_diag_I, G_diag_I)),
G_diag_nrows + 1,
thrust::make_zip_iterator(thrust::make_tuple(Pix_diag_I, Piy_diag_I, Piz_diag_I)) );
HYPRE_THRUST_CALL( copy_n,
thrust::make_zip_iterator(thrust::make_tuple(G_diag_J, G_diag_J, G_diag_J)),
G_diag_nnz,
thrust::make_zip_iterator(thrust::make_tuple(Pix_diag_J, Piy_diag_J, Piz_diag_J)) );
dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nrows, "warp", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim,
G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, Gy_data, Gz_data,
Pix_diag_data, Piy_diag_data, Piz_diag_data );
}
else
#endif
{
for (i = 0; i < G_diag_nrows + 1; i++)
{
Pix_diag_I[i] = G_diag_I[i];
Piy_diag_I[i] = G_diag_I[i];
Piz_diag_I[i] = G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
{
Pix_diag_J[i] = G_diag_J[i];
Piy_diag_J[i] = G_diag_J[i];
Piz_diag_J[i] = G_diag_J[i];
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i + 1]; j++)
{
*Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
*Piz_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
}
else if (dim == 2)
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix);
HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag);
HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag);
HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag);
hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy);
HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag);
HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag);
HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
HYPRE_THRUST_CALL( copy_n,
thrust::make_zip_iterator(thrust::make_tuple(G_diag_I, G_diag_I)),
G_diag_nrows + 1,
thrust::make_zip_iterator(thrust::make_tuple(Pix_diag_I, Piy_diag_I)) );
HYPRE_THRUST_CALL( copy_n,
thrust::make_zip_iterator(thrust::make_tuple(G_diag_J, G_diag_J)),
G_diag_nnz,
thrust::make_zip_iterator(thrust::make_tuple(Pix_diag_J, Piy_diag_J)) );
dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nrows, "warp", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim,
G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, Gy_data, NULL,
Pix_diag_data, Piy_diag_data, NULL );
}
else
#endif
{
for (i = 0; i < G_diag_nrows + 1; i++)
{
Pix_diag_I[i] = G_diag_I[i];
Piy_diag_I[i] = G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
{
Pix_diag_J[i] = G_diag_J[i];
Piy_diag_J[i] = G_diag_J[i];
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i + 1]; j++)
{
*Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
}
}
}
else
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix);
HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag);
HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag);
HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
HYPRE_THRUST_CALL( copy_n,
G_diag_I,
G_diag_nrows + 1,
Pix_diag_I );
HYPRE_THRUST_CALL( copy_n,
G_diag_J,
G_diag_nnz,
Pix_diag_J );
dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nrows, "warp", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim,
G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, NULL, NULL,
Pix_diag_data, NULL, NULL );
}
else
#endif
{
for (i = 0; i < G_diag_nrows + 1; i++)
{
Pix_diag_I[i] = G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
{
Pix_diag_J[i] = G_diag_J[i];
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i + 1]; j++)
{
*Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
}
}
}
/* Fill-in the off-diagonal part */
if (dim == 3)
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix);
HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd);
HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd);
HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd);
hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy);
HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd);
HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd);
HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd);
hypre_CSRMatrix *Piz_offd = hypre_ParCSRMatrixOffd(Piz);
HYPRE_Int *Piz_offd_I = hypre_CSRMatrixI(Piz_offd);
HYPRE_Int *Piz_offd_J = hypre_CSRMatrixJ(Piz_offd);
HYPRE_Real *Piz_offd_data = hypre_CSRMatrixData(Piz_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix);
HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy);
HYPRE_BigInt *Piz_cmap = hypre_ParCSRMatrixColMapOffd(Piz);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
if (G_offd_ncols)
{
HYPRE_THRUST_CALL( copy_n,
thrust::make_zip_iterator(thrust::make_tuple(G_offd_I, G_offd_I, G_offd_I)),
G_offd_nrows + 1,
thrust::make_zip_iterator(thrust::make_tuple(Pix_offd_I, Piy_offd_I, Piz_offd_I)) );
}
HYPRE_THRUST_CALL( copy_n,
thrust::make_zip_iterator(thrust::make_tuple(G_offd_J, G_offd_J, G_offd_J)),
G_offd_nnz,
thrust::make_zip_iterator(thrust::make_tuple(Pix_offd_J, Piy_offd_J, Piz_offd_J)) );
dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nrows, "warp", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim,
G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, Gy_data, Gz_data,
Pix_offd_data, Piy_offd_data, Piz_offd_data );
}
else
#endif
{
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows + 1; i++)
{
Pix_offd_I[i] = G_offd_I[i];
Piy_offd_I[i] = G_offd_I[i];
Piz_offd_I[i] = G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
{
Pix_offd_J[i] = G_offd_J[i];
Piy_offd_J[i] = G_offd_J[i];
Piz_offd_J[i] = G_offd_J[i];
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i + 1]; j++)
{
*Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
*Piz_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
}
for (i = 0; i < G_offd_ncols; i++)
{
Pix_cmap[i] = G_cmap[i];
Piy_cmap[i] = G_cmap[i];
Piz_cmap[i] = G_cmap[i];
}
}
else if (dim == 2)
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix);
HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd);
HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd);
HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd);
hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy);
HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd);
HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd);
HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix);
HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
if (G_offd_ncols)
{
HYPRE_THRUST_CALL( copy_n,
thrust::make_zip_iterator(thrust::make_tuple(G_offd_I, G_offd_I)),
G_offd_nrows + 1,
thrust::make_zip_iterator(thrust::make_tuple(Pix_offd_I, Piy_offd_I)) );
}
HYPRE_THRUST_CALL( copy_n,
thrust::make_zip_iterator(thrust::make_tuple(G_offd_J, G_offd_J)),
G_offd_nnz,
thrust::make_zip_iterator(thrust::make_tuple(Pix_offd_J, Piy_offd_J)) );
dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nrows, "warp", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim,
G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, Gy_data, NULL,
Pix_offd_data, Piy_offd_data, NULL );
}
else
#endif
{
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows + 1; i++)
{
Pix_offd_I[i] = G_offd_I[i];
Piy_offd_I[i] = G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
{
Pix_offd_J[i] = G_offd_J[i];
Piy_offd_J[i] = G_offd_J[i];
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i + 1]; j++)
{
*Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
}
}
for (i = 0; i < G_offd_ncols; i++)
{
Pix_cmap[i] = G_cmap[i];
Piy_cmap[i] = G_cmap[i];
}
}
else
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix);
HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd);
HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd);
HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
if (G_offd_ncols)
{
HYPRE_THRUST_CALL( copy_n,
G_offd_I,
G_offd_nrows + 1,
Pix_offd_I );
}
HYPRE_THRUST_CALL( copy_n,
G_offd_J,
G_offd_nnz,
Pix_offd_J );
dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nrows, "warp", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim,
G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, NULL, NULL,
Pix_offd_data, NULL, NULL );
}
else
#endif
{
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows + 1; i++)
{
Pix_offd_I[i] = G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
{
Pix_offd_J[i] = G_offd_J[i];
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i + 1]; j++)
{
*Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
}
}
for (i = 0; i < G_offd_ncols; i++)
{
Pix_cmap[i] = G_cmap[i];
}
}
}
*Pix_ptr = Pix;
if (dim >= 2)
{
*Piy_ptr = Piy;
}
if (dim == 3)
{
*Piz_ptr = Piz;
}
return hypre_error_flag;
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
__global__ void
hypreCUDAKernel_AMSComputeGPi_copy2(HYPRE_Int nrows,
HYPRE_Int dim,
HYPRE_Int *i_in,
HYPRE_Real *data_in,
HYPRE_Real *Gx_data,
HYPRE_Real *Gy_data,
HYPRE_Real *Gz_data,
HYPRE_Real *data_out)
{
const HYPRE_Int i = hypre_cuda_get_grid_warp_id<1, 1>();
if (i >= nrows)
{
return;
}
const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>();
HYPRE_Int j, istart, iend;
HYPRE_Real t, G[3], *Gdata[3];
Gdata[0] = Gx_data;
Gdata[1] = Gy_data;
Gdata[2] = Gz_data;
if (lane_id < 2)
{
j = read_only_load(i_in + i + lane_id);
}
istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0);
iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1);
if (lane_id < dim - 1)
{
t = read_only_load(Gdata[lane_id] + i);
}
for (HYPRE_Int d = 0; d < dim - 1; d++)
{
G[d] = __shfl_sync(HYPRE_WARP_FULL_MASK, t, d);
}
for (j = istart + lane_id; j < iend; j += HYPRE_WARP_SIZE)
{
const HYPRE_Real u = read_only_load(&data_in[j]);
const HYPRE_Real v = fabs(u) * 0.5;
const HYPRE_Int k = j * dim;
data_out[k] = u;
for (HYPRE_Int d = 0; d < dim - 1; d++)
{
data_out[k + d + 1] = v * G[d];
}
}
}
#endif
/*--------------------------------------------------------------------------
* hypre_AMSComputeGPi
*
* Construct the matrix [G,Pi] which can be considered an interpolation
* matrix from S_h^4 (4 copies of the scalar linear finite element space)
* to the edge finite elements space.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputeGPi(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **GPi_ptr)
{
hypre_ParCSRMatrix *GPi;
/* Take into account G */
dim++;
/* Compute GPi = [Pi_x, Pi_y, Pi_z, G] */
{
HYPRE_Int i, j, d;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = dim * hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts;
HYPRE_Int col_starts_size;
HYPRE_Int num_cols_offd = dim * hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = dim * hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = dim * hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G);
col_starts_size = 2;
col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST);
for (i = 0; i < col_starts_size; i++)
{
col_starts[i] = (HYPRE_BigInt) dim * col_starts_G[i];
}
GPi = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(GPi) = 1;
hypre_ParCSRMatrixInitialize(GPi);
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
if (dim >= 3)
{
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
}
if (dim == 4)
{
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(G),
hypre_ParCSRMatrixMemoryLocation(GPi) );
#endif
/* Fill-in the diagonal part */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *GPi_diag = hypre_ParCSRMatrixDiag(GPi);
HYPRE_Int *GPi_diag_I = hypre_CSRMatrixI(GPi_diag);
HYPRE_Int *GPi_diag_J = hypre_CSRMatrixJ(GPi_diag);
HYPRE_Real *GPi_diag_data = hypre_CSRMatrixData(GPi_diag);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
HYPRE_THRUST_CALL( transform,
G_diag_I,
G_diag_I + G_diag_nrows + 1,
GPi_diag_I,
dim * _1 );
dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nnz, "thread", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy1, gDim, bDim,
G_diag_nnz, dim, G_diag_J, GPi_diag_J );
gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nrows, "warp", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputeGPi_copy2, gDim, bDim,
G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, Gy_data, Gz_data,
GPi_diag_data );
}
else
#endif
{
for (i = 0; i < G_diag_nrows + 1; i++)
{
GPi_diag_I[i] = dim * G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
for (d = 0; d < dim; d++)
{
GPi_diag_J[dim * i + d] = dim * G_diag_J[i] + d;
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i + 1]; j++)
{
*GPi_diag_data++ = G_diag_data[j];
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
if (dim >= 3)
{
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
}
if (dim == 4)
{
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
}
}
/* Fill-in the off-diagonal part */
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *GPi_offd = hypre_ParCSRMatrixOffd(GPi);
HYPRE_Int *GPi_offd_I = hypre_CSRMatrixI(GPi_offd);
HYPRE_Int *GPi_offd_J = hypre_CSRMatrixJ(GPi_offd);
HYPRE_Real *GPi_offd_data = hypre_CSRMatrixData(GPi_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *GPi_cmap = hypre_ParCSRMatrixColMapOffd(GPi);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
if (G_offd_ncols)
{
HYPRE_THRUST_CALL( transform,
G_offd_I,
G_offd_I + G_offd_nrows + 1,
GPi_offd_I,
dim * _1 );
}
dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nnz, "thread", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy1, gDim, bDim,
G_offd_nnz, dim, G_offd_J, GPi_offd_J );
gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nrows, "warp", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputeGPi_copy2, gDim, bDim,
G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, Gy_data, Gz_data,
GPi_offd_data );
}
else
#endif
{
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows + 1; i++)
{
GPi_offd_I[i] = dim * G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
for (d = 0; d < dim; d++)
{
GPi_offd_J[dim * i + d] = dim * G_offd_J[i] + d;
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i + 1]; j++)
{
*GPi_offd_data++ = G_offd_data[j];
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
if (dim >= 3)
{
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
}
if (dim == 4)
{
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
}
}
for (i = 0; i < G_offd_ncols; i++)
for (d = 0; d < dim; d++)
{
GPi_cmap[dim * i + d] = dim * G_cmap[i] + d;
}
}
}
*GPi_ptr = GPi;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetup
*
* Construct the AMS solver components.
*
* The following functions need to be called before hypre_AMSSetup():
* - hypre_AMSSetDimension() (if solving a 2D problem)
* - hypre_AMSSetDiscreteGradient()
* - hypre_AMSSetCoordinateVectors() or hypre_AMSSetEdgeConstantVectors
*--------------------------------------------------------------------------*/
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
__global__ void
hypreCUDAKernel_FixInterNodes( HYPRE_Int nrows,
HYPRE_Int *G0t_diag_i,
HYPRE_Complex *G0t_diag_data,
HYPRE_Int *G0t_offd_i,
HYPRE_Complex *G0t_offd_data,
HYPRE_Real *interior_nodes_data)
{
HYPRE_Int row_i = hypre_cuda_get_grid_warp_id<1, 1>();
if (row_i >= nrows)
{
return;
}
HYPRE_Int lane = hypre_cuda_get_lane_id<1>();
HYPRE_Int not1 = 0;
if (lane == 0)
{
not1 = read_only_load(&interior_nodes_data[row_i]) != 1.0;
}
not1 = __shfl_sync(HYPRE_WARP_FULL_MASK, not1, 0);
if (!not1)
{
return;
}
HYPRE_Int p1, q1, p2 = 0, q2 = 0;
bool nonempty_offd = G0t_offd_data != NULL;
if (lane < 2)
{
p1 = read_only_load(G0t_diag_i + row_i + lane);
if (nonempty_offd)
{
p2 = read_only_load(G0t_offd_i + row_i + lane);
}
}
q1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 1);
p1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 0);
if (nonempty_offd)
{
q2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 1);
p2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 0);
}
for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE)
{
G0t_diag_data[j] = 0.0;
}
for (HYPRE_Int j = p2 + lane; j < q2; j += HYPRE_WARP_SIZE)
{
G0t_offd_data[j] = 0.0;
}
}
__global__ void
hypreCUDAKernel_AMSSetupScaleGGt( HYPRE_Int Gt_num_rows,
HYPRE_Int *Gt_diag_i,
HYPRE_Int *Gt_diag_j,
HYPRE_Real *Gt_diag_data,
HYPRE_Int *Gt_offd_i,
HYPRE_Real *Gt_offd_data,
HYPRE_Real *Gx_data,
HYPRE_Real *Gy_data,
HYPRE_Real *Gz_data )
{
HYPRE_Int row_i = hypre_cuda_get_grid_warp_id<1, 1>();
if (row_i >= Gt_num_rows)
{
return;
}
HYPRE_Int lane = hypre_cuda_get_lane_id<1>();
HYPRE_Real h2 = 0.0;
HYPRE_Int ne, p1, q1, p2 = 0, q2 = 0;
if (lane < 2)
{
p1 = read_only_load(Gt_diag_i + row_i + lane);
}
q1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 1);
p1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 0);
ne = q1 - p1;
if (ne == 0)
{
return;
}
if (Gt_offd_data != NULL)
{
if (lane < 2)
{
p2 = read_only_load(Gt_offd_i + row_i + lane);
}
q2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 1);
p2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 0);
}
for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE)
{
const HYPRE_Int k = read_only_load(&Gt_diag_j[j]);
const HYPRE_Real Gx = read_only_load(&Gx_data[k]);
const HYPRE_Real Gy = read_only_load(&Gy_data[k]);
const HYPRE_Real Gz = read_only_load(&Gz_data[k]);
h2 += Gx * Gx + Gy * Gy + Gz * Gz;
}
h2 = warp_allreduce_sum(h2) / ne;
for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE)
{
Gt_diag_data[j] *= h2;
}
for (HYPRE_Int j = p2 + lane; j < q2; j += HYPRE_WARP_SIZE)
{
Gt_offd_data[j] *= h2;
}
}
#endif
HYPRE_Int hypre_AMSSetup(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
#endif
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int input_info = 0;
ams_data -> A = A;
/* Modifications for problems with zero-conductivity regions */
if (ams_data -> interior_nodes)
{
hypre_ParCSRMatrix *G0t, *Aorig = A;
/* Make sure that multiple Setup()+Solve() give identical results */
ams_data -> solve_counter = 0;
/* Construct the discrete gradient matrix for the zero-conductivity region
by eliminating the zero-conductivity nodes from G^t. The range of G0
represents the kernel of A, i.e. the gradients of nodal basis functions
supported in zero-conductivity regions. */
hypre_ParCSRMatrixTranspose(ams_data -> G, &G0t, 1);
{
HYPRE_Int i, j;
HYPRE_Int nv = hypre_ParCSRMatrixNumCols(ams_data -> G);
hypre_CSRMatrix *G0td = hypre_ParCSRMatrixDiag(G0t);
HYPRE_Int *G0tdI = hypre_CSRMatrixI(G0td);
HYPRE_Real *G0tdA = hypre_CSRMatrixData(G0td);
hypre_CSRMatrix *G0to = hypre_ParCSRMatrixOffd(G0t);
HYPRE_Int *G0toI = hypre_CSRMatrixI(G0to);
HYPRE_Real *G0toA = hypre_CSRMatrixData(G0to);
HYPRE_Real *interior_nodes_data = hypre_VectorData(
hypre_ParVectorLocalVector((hypre_ParVector*) ams_data -> interior_nodes));
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(nv, "warp", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_FixInterNodes, gDim, bDim,
nv, G0tdI, G0tdA, G0toI, G0toA, interior_nodes_data );
}
else
#endif
{
for (i = 0; i < nv; i++)
{
if (interior_nodes_data[i] != 1)
{
for (j = G0tdI[i]; j < G0tdI[i + 1]; j++)
{
G0tdA[j] = 0.0;
}
if (G0toI)
for (j = G0toI[i]; j < G0toI[i + 1]; j++)
{
G0toA[j] = 0.0;
}
}
}
}
}
hypre_ParCSRMatrixTranspose(G0t, & ams_data -> G0, 1);
/* Construct the subspace matrix A_G0 = G0^T G0 */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
ams_data -> A_G0 = hypre_ParCSRMatMat(G0t, ams_data -> G0);
}
else
#endif
{
ams_data -> A_G0 = hypre_ParMatmul(G0t, ams_data -> G0);
}
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G0);
/* Create AMG solver for A_G0 */
HYPRE_BoomerAMGCreate(&ams_data -> B_G0);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G0, ams_data -> B_G_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G0, ams_data -> B_G_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G0, ams_data -> B_G_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G0, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G0, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_G0, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G0, 3); /* use just a few V-cycles */
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G0, ams_data -> B_G_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_G0, ams_data -> B_G_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G0, ams_data -> B_G_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G0, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G0, ams_data -> B_G_coarse_relax_type, 3);
HYPRE_BoomerAMGSetup(ams_data -> B_G0,
(HYPRE_ParCSRMatrix)ams_data -> A_G0,
0, 0);
/* Construct the preconditioner for ams_data->A = A + G0 G0^T.
NOTE: this can be optimized significantly by taking into account that
the sparsity pattern of A is subset of the sparsity pattern of G0 G0^T */
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_ParCSRMatrix *A;
if (exec == HYPRE_EXEC_DEVICE)
{
A = hypre_ParCSRMatMat(ams_data -> G0, G0t);
}
else
#endif
{
A = hypre_ParMatmul(ams_data -> G0, G0t);
}
hypre_ParCSRMatrix *B = Aorig;
hypre_ParCSRMatrix **C_ptr = &ams_data -> A;
hypre_ParCSRMatrix *C;
HYPRE_Real factor, lfactor;
/* scale (penalize) G0 G0^T before adding it to the matrix */
{
HYPRE_Int i;
HYPRE_Int B_num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(B));
HYPRE_Real *B_diag_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(B));
HYPRE_Real *B_offd_data = hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(B));
HYPRE_Int *B_diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(B));
HYPRE_Int *B_offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(B));
lfactor = -1;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
HYPRE_Int nnz_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B));
HYPRE_Int nnz_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B));
#if defined(HYPRE_DEBUG)
HYPRE_Int nnz;
hypre_TMemcpy(&nnz, &B_diag_i[B_num_rows], HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
hypre_assert(nnz == nnz_diag);
hypre_TMemcpy(&nnz, &B_offd_i[B_num_rows], HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
hypre_assert(nnz == nnz_offd);
#endif
if (nnz_diag)
{
lfactor = HYPRE_THRUST_CALL( reduce,
thrust::make_transform_iterator(B_diag_data, absolute_value<HYPRE_Real>()),
thrust::make_transform_iterator(B_diag_data + nnz_diag, absolute_value<HYPRE_Real>()),
-1.0,
thrust::maximum<HYPRE_Real>() );
}
if (nnz_offd)
{
lfactor = HYPRE_THRUST_CALL( reduce,
thrust::make_transform_iterator(B_offd_data, absolute_value<HYPRE_Real>()),
thrust::make_transform_iterator(B_offd_data + nnz_offd, absolute_value<HYPRE_Real>()),
lfactor,
thrust::maximum<HYPRE_Real>() );
}
}
else
#endif
{
for (i = 0; i < B_diag_i[B_num_rows]; i++)
if (fabs(B_diag_data[i]) > lfactor)
{
lfactor = fabs(B_diag_data[i]);
}
for (i = 0; i < B_offd_i[B_num_rows]; i++)
if (fabs(B_offd_data[i]) > lfactor)
{
lfactor = fabs(B_offd_data[i]);
}
}
lfactor *= 1e-10; /* scaling factor: max|A_ij|*1e-10 */
hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A));
}
hypre_ParCSRMatrixAdd(factor, A, 1.0, B, &C);
/*hypre_CSRMatrix *A_local, *B_local, *C_local, *C_tmp;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A));
HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A));
HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B));
HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B));
HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B));
A_local = hypre_MergeDiagAndOffd(A);
B_local = hypre_MergeDiagAndOffd(B);*/
/* scale (penalize) G0 G0^T before adding it to the matrix */
/*{
HYPRE_Int i, nnz = hypre_CSRMatrixNumNonzeros(A_local);
HYPRE_Real *data = hypre_CSRMatrixData(A_local);
HYPRE_Real *dataB = hypre_CSRMatrixData(B_local);
HYPRE_Int nnzB = hypre_CSRMatrixNumNonzeros(B_local);
HYPRE_Real factor, lfactor;
lfactor = -1;
for (i = 0; i < nnzB; i++)
if (fabs(dataB[i]) > lfactor)
lfactor = fabs(dataB[i]);
lfactor *= 1e-10;
hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX,
hypre_ParCSRMatrixComm(A));
for (i = 0; i < nnz; i++)
data[i] *= factor;
}
C_tmp = hypre_CSRMatrixBigAdd(A_local, B_local);
C_local = hypre_CSRMatrixBigDeleteZeros(C_tmp,0.0);
if (C_local)
hypre_CSRMatrixDestroy(C_tmp);
else
C_local = C_tmp;
C = hypre_ParCSRMatrixCreate (comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
A_num_cols_offd + B_num_cols_offd,
A_num_nonzeros_diag + B_num_nonzeros_diag,
A_num_nonzeros_offd + B_num_nonzeros_offd);
GenerateDiagAndOffd(C_local, C,
hypre_ParCSRMatrixFirstColDiag(A),
hypre_ParCSRMatrixLastColDiag(A));
hypre_CSRMatrixDestroy(A_local);
hypre_CSRMatrixDestroy(B_local);
hypre_CSRMatrixDestroy(C_local);
*/
hypre_ParCSRMatrixDestroy(A);
*C_ptr = C;
}
hypre_ParCSRMatrixDestroy(G0t);
}
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(ams_data -> A)); */
/* Compute the l1 norm of the rows of A */
if (ams_data -> A_relax_type >= 1 && ams_data -> A_relax_type <= 4)
{
HYPRE_Real *l1_norm_data = NULL;
hypre_ParCSRComputeL1Norms(ams_data -> A, ams_data -> A_relax_type, NULL, &l1_norm_data);
ams_data -> A_l1_norms = hypre_SeqVectorCreate(hypre_ParCSRMatrixNumRows(ams_data -> A));
hypre_VectorData(ams_data -> A_l1_norms) = l1_norm_data;
hypre_SeqVectorInitialize_v2(ams_data -> A_l1_norms,
hypre_ParCSRMatrixMemoryLocation(ams_data -> A));
}
/* Chebyshev? */
if (ams_data -> A_relax_type == 16)
{
hypre_ParCSRMaxEigEstimateCG(ams_data->A, 1, 10,
&ams_data->A_max_eig_est,
&ams_data->A_min_eig_est);
}
/* If not given, compute Gx, Gy and Gz */
{
if (ams_data -> x != NULL &&
(ams_data -> dim == 1 || ams_data -> y != NULL) &&
(ams_data -> dim <= 2 || ams_data -> z != NULL))
{
input_info = 1;
}
if (ams_data -> Gx != NULL &&
(ams_data -> dim == 1 || ams_data -> Gy != NULL) &&
(ams_data -> dim <= 2 || ams_data -> Gz != NULL))
{
input_info = 2;
}
if (input_info == 1)
{
ams_data -> Gx = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> x, 0.0, ams_data -> Gx);
if (ams_data -> dim >= 2)
{
ams_data -> Gy = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> y, 0.0, ams_data -> Gy);
}
if (ams_data -> dim == 3)
{
ams_data -> Gz = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> z, 0.0, ams_data -> Gz);
}
}
}
if (ams_data -> Pi == NULL && ams_data -> Pix == NULL)
{
if (ams_data -> cycle_type == 20)
/* Construct the combined interpolation matrix [G,Pi] */
hypre_AMSComputeGPi(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pi);
else if (ams_data -> cycle_type > 10)
/* Construct Pi{x,y,z} instead of Pi = [Pix,Piy,Piz] */
hypre_AMSComputePixyz(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pix,
&ams_data -> Piy,
&ams_data -> Piz);
else
/* Construct the Pi interpolation matrix */
hypre_AMSComputePi(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pi);
}
/* Keep Gx, Gy and Gz only if use the method with discrete divergence
stabilization (where we use them to compute the local mesh size). */
if (input_info == 1 && ams_data -> cycle_type != 9)
{
hypre_ParVectorDestroy(ams_data -> Gx);
if (ams_data -> dim >= 2)
{
hypre_ParVectorDestroy(ams_data -> Gy);
}
if (ams_data -> dim == 3)
{
hypre_ParVectorDestroy(ams_data -> Gz);
}
}
/* Create the AMG solver on the range of G^T */
if (!ams_data -> beta_is_zero && ams_data -> cycle_type != 20)
{
HYPRE_BoomerAMGCreate(&ams_data -> B_G);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G, ams_data -> B_G_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G, ams_data -> B_G_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G, ams_data -> B_G_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_G, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G, ams_data -> B_G_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_G, ams_data -> B_G_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G, ams_data -> B_G_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G, ams_data -> B_G_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
{
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 2);
}
/* If not given, construct the coarse space matrix by RAP */
if (!ams_data -> A_G)
{
if (!hypre_ParCSRMatrixCommPkg(ams_data -> G))
{
hypre_MatvecCommPkgCreate(ams_data -> G);
}
if (!hypre_ParCSRMatrixCommPkg(ams_data -> A))
{
hypre_MatvecCommPkgCreate(ams_data -> A);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
ams_data -> A_G = hypre_ParCSRMatrixRAPKT(ams_data -> G,
ams_data -> A,
ams_data -> G, 1);
}
else
#endif
{
hypre_BoomerAMGBuildCoarseOperator(ams_data -> G,
ams_data -> A,
ams_data -> G,
&ams_data -> A_G);
}
/* Make sure that A_G has no zero rows (this can happen
if beta is zero in part of the domain). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G);
ams_data -> owns_A_G = 1;
}
HYPRE_BoomerAMGSetup(ams_data -> B_G,
(HYPRE_ParCSRMatrix)ams_data -> A_G,
NULL, NULL);
}
if (ams_data -> cycle_type > 10 && ams_data -> cycle_type != 20)
/* Create the AMG solvers on the range of Pi{x,y,z}^T */
{
HYPRE_BoomerAMGCreate(&ams_data -> B_Pix);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pix, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pix, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pix, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Pix, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pix, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pix, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pix, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pix, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pix, 2);
HYPRE_BoomerAMGCreate(&ams_data -> B_Piy);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piy, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piy, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piy, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Piy, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piy, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piy, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piy, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piy, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piy, 2);
HYPRE_BoomerAMGCreate(&ams_data -> B_Piz);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piz, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piz, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piz, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Piz, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piz, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piz, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piz, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piz, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piz, 2);
/* Generally, don't use exact solve on the coarsest level (matrices may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_coarse_relax_type, 3);
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_coarse_relax_type, 3);
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
{
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 2);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 2);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 2);
}
/* Construct the coarse space matrices by RAP */
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pix))
{
hypre_MatvecCommPkgCreate(ams_data -> Pix);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
ams_data -> A_Pix = hypre_ParCSRMatrixRAPKT(ams_data -> Pix, ams_data -> A, ams_data -> Pix, 1);
}
else
#endif
{
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pix,
ams_data -> A,
ams_data -> Pix,
&ams_data -> A_Pix);
}
/* Make sure that A_Pix has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pix);
HYPRE_BoomerAMGSetup(ams_data -> B_Pix,
(HYPRE_ParCSRMatrix)ams_data -> A_Pix,
NULL, NULL);
if (ams_data -> Piy)
{
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piy))
{
hypre_MatvecCommPkgCreate(ams_data -> Piy);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
ams_data -> A_Piy = hypre_ParCSRMatrixRAPKT(ams_data -> Piy,
ams_data -> A,
ams_data -> Piy, 1);
}
else
#endif
{
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piy,
ams_data -> A,
ams_data -> Piy,
&ams_data -> A_Piy);
}
/* Make sure that A_Piy has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piy);
HYPRE_BoomerAMGSetup(ams_data -> B_Piy,
(HYPRE_ParCSRMatrix)ams_data -> A_Piy,
NULL, NULL);
}
if (ams_data -> Piz)
{
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piz))
{
hypre_MatvecCommPkgCreate(ams_data -> Piz);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
ams_data -> A_Piz = hypre_ParCSRMatrixRAPKT(ams_data -> Piz,
ams_data -> A,
ams_data -> Piz, 1);
}
else
#endif
{
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piz,
ams_data -> A,
ams_data -> Piz,
&ams_data -> A_Piz);
}
/* Make sure that A_Piz has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piz);
HYPRE_BoomerAMGSetup(ams_data -> B_Piz,
(HYPRE_ParCSRMatrix)ams_data -> A_Piz,
NULL, NULL);
}
}
else
/* Create the AMG solver on the range of Pi^T */
{
HYPRE_BoomerAMGCreate(&ams_data -> B_Pi);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pi, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pi, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pi, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Pi, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pi, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pi, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pi, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pi, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pi, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
{
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 2);
}
/* If not given, construct the coarse space matrix by RAP and
notify BoomerAMG that this is a dim x dim block system. */
if (!ams_data -> A_Pi)
{
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pi))
{
hypre_MatvecCommPkgCreate(ams_data -> Pi);
}
if (!hypre_ParCSRMatrixCommPkg(ams_data -> A))
{
hypre_MatvecCommPkgCreate(ams_data -> A);
}
if (ams_data -> cycle_type == 9)
{
/* Add a discrete divergence term to A before computing Pi^t A Pi */
{
hypre_ParCSRMatrix *Gt, *GGt, *ApGGt;
hypre_ParCSRMatrixTranspose(ams_data -> G, &Gt, 1);
/* scale GGt by h^2 */
{
HYPRE_Real h2;
HYPRE_Int i, j, k, ne;
hypre_CSRMatrix *Gt_diag = hypre_ParCSRMatrixDiag(Gt);
HYPRE_Int Gt_num_rows = hypre_CSRMatrixNumRows(Gt_diag);
HYPRE_Int *Gt_diag_I = hypre_CSRMatrixI(Gt_diag);
HYPRE_Int *Gt_diag_J = hypre_CSRMatrixJ(Gt_diag);
HYPRE_Real *Gt_diag_data = hypre_CSRMatrixData(Gt_diag);
hypre_CSRMatrix *Gt_offd = hypre_ParCSRMatrixOffd(Gt);
HYPRE_Int *Gt_offd_I = hypre_CSRMatrixI(Gt_offd);
HYPRE_Real *Gt_offd_data = hypre_CSRMatrixData(Gt_offd);
HYPRE_Real *Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gx));
HYPRE_Real *Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gy));
HYPRE_Real *Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gz));
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
dim3 bDim = hypre_GetDefaultCUDABlockDimension();
dim3 gDim = hypre_GetDefaultCUDAGridDimension(Gt_num_rows, "warp", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSSetupScaleGGt, gDim, bDim,
Gt_num_rows, Gt_diag_I, Gt_diag_J, Gt_diag_data, Gt_offd_I, Gt_offd_data,
Gx_data, Gy_data, Gz_data );
}
else
#endif
{
for (i = 0; i < Gt_num_rows; i++)
{
/* determine the characteristic mesh size for vertex i */
h2 = 0.0;
ne = 0;
for (j = Gt_diag_I[i]; j < Gt_diag_I[i + 1]; j++)
{
k = Gt_diag_J[j];
h2 += Gx_data[k] * Gx_data[k] + Gy_data[k] * Gy_data[k] + Gz_data[k] * Gz_data[k];
ne++;
}
if (ne != 0)
{
h2 /= ne;
for (j = Gt_diag_I[i]; j < Gt_diag_I[i + 1]; j++)
{
Gt_diag_data[j] *= h2;
}
for (j = Gt_offd_I[i]; j < Gt_offd_I[i + 1]; j++)
{
Gt_offd_data[j] *= h2;
}
}
}
}
}
/* we only needed Gx, Gy and Gz to compute the local mesh size */
if (input_info == 1)
{
hypre_ParVectorDestroy(ams_data -> Gx);
if (ams_data -> dim >= 2)
{
hypre_ParVectorDestroy(ams_data -> Gy);
}
if (ams_data -> dim == 3)
{
hypre_ParVectorDestroy(ams_data -> Gz);
}
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
GGt = hypre_ParCSRMatMat(ams_data -> G, Gt);
}
#endif
else
{
GGt = hypre_ParMatmul(ams_data -> G, Gt);
}
hypre_ParCSRMatrixDestroy(Gt);
/* hypre_ParCSRMatrixAdd(GGt, A, &ams_data -> A); */
hypre_ParCSRMatrixAdd(1.0, GGt, 1.0, ams_data -> A, &ApGGt);
/*{
hypre_ParCSRMatrix *A = GGt;
hypre_ParCSRMatrix *B = ams_data -> A;
hypre_ParCSRMatrix **C_ptr = &ApGGt;
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *A_local, *B_local, *C_local;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A));
HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A));
HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B));
HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B));
HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B));
A_local = hypre_MergeDiagAndOffd(A);
B_local = hypre_MergeDiagAndOffd(B);
C_local = hypre_CSRMatrixBigAdd(A_local, B_local);
hypre_CSRMatrixBigJtoJ(C_local);
C = hypre_ParCSRMatrixCreate (comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
A_num_cols_offd + B_num_cols_offd,
A_num_nonzeros_diag + B_num_nonzeros_diag,
A_num_nonzeros_offd + B_num_nonzeros_offd);
GenerateDiagAndOffd(C_local, C,
hypre_ParCSRMatrixFirstColDiag(A),
hypre_ParCSRMatrixLastColDiag(A));
hypre_CSRMatrixDestroy(A_local);
hypre_CSRMatrixDestroy(B_local);
hypre_CSRMatrixDestroy(C_local);
*C_ptr = C;
}*/
hypre_ParCSRMatrixDestroy(GGt);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
ams_data -> A_Pi = hypre_ParCSRMatrixRAPKT(ams_data -> Pi, ApGGt, ams_data -> Pi, 1);
}
else
#endif
{
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi,
ApGGt,
ams_data -> Pi,
&ams_data -> A_Pi);
}
}
}
else
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (exec == HYPRE_EXEC_DEVICE)
{
ams_data -> A_Pi = hypre_ParCSRMatrixRAPKT(ams_data -> Pi, ams_data -> A, ams_data -> Pi, 1);
}
else
#endif
{
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi,
ams_data -> A,
ams_data -> Pi,
&ams_data -> A_Pi);
}
}
ams_data -> owns_A_Pi = 1;
if (ams_data -> cycle_type != 20)
{
HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim);
}
else
{
HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim + 1);
}
/* HYPRE_BoomerAMGSetNodal(ams_data -> B_Pi, 1); */
}
/* Make sure that A_Pi has no zero rows (this can happen for
some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pi);
HYPRE_BoomerAMGSetup(ams_data -> B_Pi,
(HYPRE_ParCSRMatrix)ams_data -> A_Pi,
0, 0);
}
/* Allocate temporary vectors */
ams_data -> r0 = hypre_ParVectorInRangeOf(ams_data -> A);
ams_data -> g0 = hypre_ParVectorInRangeOf(ams_data -> A);
if (ams_data -> A_G)
{
ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_G);
ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_G);
}
if (ams_data -> r1 == NULL && ams_data -> A_Pix)
{
ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix);
ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix);
}
if (ams_data -> Pi)
{
ams_data -> r2 = hypre_ParVectorInDomainOf(ams_data -> Pi);
ams_data -> g2 = hypre_ParVectorInDomainOf(ams_data -> Pi);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSolve
*
* Solve the system A x = b.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSolve(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int i, my_id = -1;
HYPRE_Real r0_norm, r_norm, b_norm, relative_resid = 0, old_resid;
char cycle[30];
hypre_ParCSRMatrix *Ai[5], *Pi[5];
HYPRE_Solver Bi[5];
HYPRE_PtrToSolverFcn HBi[5];
hypre_ParVector *ri[5], *gi[5];
HYPRE_Int needZ = 0;
hypre_ParVector *z = ams_data -> zz;
Ai[0] = ams_data -> A_G; Pi[0] = ams_data -> G;
Ai[1] = ams_data -> A_Pi; Pi[1] = ams_data -> Pi;
Ai[2] = ams_data -> A_Pix; Pi[2] = ams_data -> Pix;
Ai[3] = ams_data -> A_Piy; Pi[3] = ams_data -> Piy;
Ai[4] = ams_data -> A_Piz; Pi[4] = ams_data -> Piz;
Bi[0] = ams_data -> B_G; HBi[0] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[1] = ams_data -> B_Pi; HBi[1] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGBlockSolve;
Bi[2] = ams_data -> B_Pix; HBi[2] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[3] = ams_data -> B_Piy; HBi[3] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[4] = ams_data -> B_Piz; HBi[4] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
ri[0] = ams_data -> r1; gi[0] = ams_data -> g1;
ri[1] = ams_data -> r2; gi[1] = ams_data -> g2;
ri[2] = ams_data -> r1; gi[2] = ams_data -> g1;
ri[3] = ams_data -> r1; gi[3] = ams_data -> g1;
ri[4] = ams_data -> r1; gi[4] = ams_data -> g1;
/* may need to create an additional temporary vector for relaxation */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
needZ = ams_data -> A_relax_type == 2 || ams_data -> A_relax_type == 4 ||
ams_data -> A_relax_type == 16;
}
else
#endif
{
needZ = hypre_NumThreads() > 1 || ams_data -> A_relax_type == 16;
}
if (needZ && !z)
{
z = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(z);
ams_data -> zz = z;
}
if (ams_data -> print_level > 0)
{
hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(A), &my_id);
}
/* Compatible subspace projection for problems with zero-conductivity regions.
Note that this modifies the input (r.h.s.) vector b! */
if ( (ams_data -> B_G0) &&
(++ams_data->solve_counter % ( ams_data -> projection_frequency ) == 0) )
{
/* hypre_printf("Projecting onto the compatible subspace...\n"); */
hypre_AMSProjectOutGradients(ams_data, b);
}
if (ams_data -> beta_is_zero)
{
switch (ams_data -> cycle_type)
{
case 0:
hypre_sprintf(cycle, "%s", "0");
break;
case 1:
case 3:
case 5:
case 7:
default:
hypre_sprintf(cycle, "%s", "020");
break;
case 2:
case 4:
case 6:
case 8:
hypre_sprintf(cycle, "%s", "(0+2)");
break;
case 11:
case 13:
hypre_sprintf(cycle, "%s", "0345430");
break;
case 12:
hypre_sprintf(cycle, "%s", "(0+3+4+5)");
break;
case 14:
hypre_sprintf(cycle, "%s", "0(+3+4+5)0");
break;
}
}
else
{
switch (ams_data -> cycle_type)
{
case 0:
hypre_sprintf(cycle, "%s", "010");
break;
case 1:
default:
hypre_sprintf(cycle, "%s", "01210");
break;
case 2:
hypre_sprintf(cycle, "%s", "(0+1+2)");
break;
case 3:
hypre_sprintf(cycle, "%s", "02120");
break;
case 4:
hypre_sprintf(cycle, "%s", "(010+2)");
break;
case 5:
hypre_sprintf(cycle, "%s", "0102010");
break;
case 6:
hypre_sprintf(cycle, "%s", "(020+1)");
break;
case 7:
hypre_sprintf(cycle, "%s", "0201020");
break;
case 8:
hypre_sprintf(cycle, "%s", "0(+1+2)0");
break;
case 9:
hypre_sprintf(cycle, "%s", "01210");
break;
case 11:
hypre_sprintf(cycle, "%s", "013454310");
break;
case 12:
hypre_sprintf(cycle, "%s", "(0+1+3+4+5)");
break;
case 13:
hypre_sprintf(cycle, "%s", "034515430");
break;
case 14:
hypre_sprintf(cycle, "%s", "01(+3+4+5)10");
break;
case 20:
hypre_sprintf(cycle, "%s", "020");
break;
}
}
for (i = 0; i < ams_data -> maxit; i++)
{
/* Compute initial residual norms */
if (ams_data -> maxit > 1 && i == 0)
{
hypre_ParVectorCopy(b, ams_data -> r0);
hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0);
r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0, ams_data -> r0));
r0_norm = r_norm;
b_norm = sqrt(hypre_ParVectorInnerProd(b, b));
if (b_norm)
{
relative_resid = r_norm / b_norm;
}
else
{
relative_resid = r_norm;
}
if (my_id == 0 && ams_data -> print_level > 0)
{
hypre_printf(" relative\n");
hypre_printf(" residual factor residual\n");
hypre_printf(" -------- ------ --------\n");
hypre_printf(" Initial %e %e\n",
r_norm, relative_resid);
}
}
/* Apply the preconditioner */
hypre_ParCSRSubspacePrec(ams_data -> A,
ams_data -> A_relax_type,
ams_data -> A_relax_times,
ams_data -> A_l1_norms ? hypre_VectorData(ams_data -> A_l1_norms) : NULL,
ams_data -> A_relax_weight,
ams_data -> A_omega,
ams_data -> A_max_eig_est,
ams_data -> A_min_eig_est,
ams_data -> A_cheby_order,
ams_data -> A_cheby_fraction,
Ai, Bi, HBi, Pi, ri, gi,
b, x,
ams_data -> r0,
ams_data -> g0,
cycle,
z);
/* Compute new residual norms */
if (ams_data -> maxit > 1)
{
old_resid = r_norm;
hypre_ParVectorCopy(b, ams_data -> r0);
hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0);
r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0, ams_data -> r0));
if (b_norm)
{
relative_resid = r_norm / b_norm;
}
else
{
relative_resid = r_norm;
}
if (my_id == 0 && ams_data -> print_level > 0)
hypre_printf(" Cycle %2d %e %f %e \n",
i + 1, r_norm, r_norm / old_resid, relative_resid);
}
if (relative_resid < ams_data -> tol)
{
i++;
break;
}
}
if (my_id == 0 && ams_data -> print_level > 0 && ams_data -> maxit > 1)
hypre_printf("\n\n Average Convergence Factor = %f\n\n",
pow((r_norm / r0_norm), (1.0 / (HYPRE_Real) i)));
ams_data -> num_iterations = i;
ams_data -> rel_resid_norm = relative_resid;
if (ams_data -> num_iterations == ams_data -> maxit && ams_data -> tol > 0.0)
{
hypre_error(HYPRE_ERROR_CONV);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRSubspacePrec
*
* General subspace preconditioner for A0 y = x, based on ParCSR storage.
*
* P[i] and A[i] are the interpolation and coarse grid matrices for
* the (i+1)'th subspace. B[i] is an AMG solver for A[i]. r[i] and g[i]
* are temporary vectors. A0_* are the fine grid smoothing parameters.
*
* The default mode is multiplicative, '+' changes the next correction
* to additive, based on residual computed at '('.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRSubspacePrec(/* fine space matrix */
hypre_ParCSRMatrix *A0,
/* relaxation parameters */
HYPRE_Int A0_relax_type,
HYPRE_Int A0_relax_times,
HYPRE_Real *A0_l1_norms,
HYPRE_Real A0_relax_weight,
HYPRE_Real A0_omega,
HYPRE_Real A0_max_eig_est,
HYPRE_Real A0_min_eig_est,
HYPRE_Int A0_cheby_order,
HYPRE_Real A0_cheby_fraction,
/* subspace matrices */
hypre_ParCSRMatrix **A,
/* subspace preconditioners */
HYPRE_Solver *B,
/* hypre solver functions for B */
HYPRE_PtrToSolverFcn *HB,
/* subspace interpolations */
hypre_ParCSRMatrix **P,
/* temporary subspace vectors */
hypre_ParVector **r,
hypre_ParVector **g,
/* right-hand side */
hypre_ParVector *x,
/* current approximation */
hypre_ParVector *y,
/* current residual */
hypre_ParVector *r0,
/* temporary vector */
hypre_ParVector *g0,
char *cycle,
/* temporary vector */
hypre_ParVector *z)
{
char *op;
HYPRE_Int use_saved_residual = 0;
for (op = cycle; *op != '\0'; op++)
{
/* do nothing */
if (*op == ')')
{
continue;
}
/* compute the residual: r = x - Ay */
else if (*op == '(')
{
hypre_ParVectorCopy(x, r0);
hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, r0);
}
/* switch to additive correction */
else if (*op == '+')
{
use_saved_residual = 1;
continue;
}
/* smooth: y += S (x - Ay) */
else if (*op == '0')
{
hypre_ParCSRRelax(A0, x,
A0_relax_type,
A0_relax_times,
A0_l1_norms,
A0_relax_weight,
A0_omega,
A0_max_eig_est,
A0_min_eig_est,
A0_cheby_order,
A0_cheby_fraction,
y, g0, z);
}
/* subspace correction: y += P B^{-1} P^t r */
else
{
HYPRE_Int i = *op - '1';
if (i < 0)
{
hypre_error_in_arg(16);
}
/* skip empty subspaces */
if (!A[i]) { continue; }
/* compute the residual? */
if (use_saved_residual)
{
use_saved_residual = 0;
hypre_ParCSRMatrixMatvecT(1.0, P[i], r0, 0.0, r[i]);
}
else
{
hypre_ParVectorCopy(x, g0);
hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, g0);
hypre_ParCSRMatrixMatvecT(1.0, P[i], g0, 0.0, r[i]);
}
hypre_ParVectorSetConstantValues(g[i], 0.0);
(*HB[i]) (B[i], (HYPRE_Matrix)A[i],
(HYPRE_Vector)r[i], (HYPRE_Vector)g[i]);
hypre_ParCSRMatrixMatvec(1.0, P[i], g[i], 0.0, g0);
hypre_ParVectorAxpy(1.0, g0, y);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSGetNumIterations
*
* Get the number of AMS iterations.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSGetNumIterations(void *solver,
HYPRE_Int *num_iterations)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
*num_iterations = ams_data -> num_iterations;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSGetFinalRelativeResidualNorm
*
* Get the final relative residual norm in AMS.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSGetFinalRelativeResidualNorm(void *solver,
HYPRE_Real *rel_resid_norm)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
*rel_resid_norm = ams_data -> rel_resid_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSProjectOutGradients
*
* For problems with zero-conductivity regions, project the vector onto the
* compatible subspace: x = (I - G0 (G0^t G0)^{-1} G0^T) x, where G0 is the
* discrete gradient restricted to the interior nodes of the regions with
* zero conductivity. This ensures that x is orthogonal to the gradients in
* the range of G0.
*
* This function is typically called after the solution iteration is complete,
* in order to facilitate the visualization of the computed field. Without it
* the values in the zero-conductivity regions contain kernel components.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSProjectOutGradients(void *solver,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (ams_data -> B_G0)
{
hypre_ParCSRMatrixMatvecT(1.0, ams_data -> G0, x, 0.0, ams_data -> r1);
hypre_ParVectorSetConstantValues(ams_data -> g1, 0.0);
hypre_BoomerAMGSolve(ams_data -> B_G0, ams_data -> A_G0, ams_data -> r1, ams_data -> g1);
hypre_ParCSRMatrixMatvec(1.0, ams_data -> G0, ams_data -> g1, 0.0, ams_data -> g0);
hypre_ParVectorAxpy(-1.0, ams_data -> g0, x);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSConstructDiscreteGradient
*
* Construct and return the lowest-order discrete gradient matrix G, based on:
* - a matrix on the egdes (e.g. the stiffness matrix A)
* - a vector on the vertices (e.g. the x coordinates)
* - the array edge_vertex, which lists the global indexes of the
* vertices of the local edges.
*
* We assume that edge_vertex lists the edge vertices consecutively,
* and that the orientation of all edges is consistent. More specificaly:
* If edge_orientation = 1, the edges are already oriented.
* If edge_orientation = 2, the orientation of edge i depends only on the
* sign of edge_vertex[2*i+1] - edge_vertex[2*i].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSConstructDiscreteGradient(hypre_ParCSRMatrix *A,
hypre_ParVector *x_coord,
HYPRE_BigInt *edge_vertex,
HYPRE_Int edge_orientation,
hypre_ParCSRMatrix **G_ptr)
{
hypre_ParCSRMatrix *G;
HYPRE_Int nedges;
nedges = hypre_ParCSRMatrixNumRows(A);
/* Construct the local part of G based on edge_vertex and the edge
and vertex partitionings from A and x_coord */
{
HYPRE_Int i, *I = hypre_CTAlloc(HYPRE_Int, nedges + 1, HYPRE_MEMORY_HOST);
HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2 * nedges, HYPRE_MEMORY_HOST);
hypre_CSRMatrix *local = hypre_CSRMatrixCreate (nedges,
hypre_ParVectorGlobalSize(x_coord),
2 * nedges);
for (i = 0; i <= nedges; i++)
{
I[i] = 2 * i;
}
if (edge_orientation == 1)
{
/* Assume that the edges are already oriented */
for (i = 0; i < 2 * nedges; i += 2)
{
data[i] = -1.0;
data[i + 1] = 1.0;
}
}
else if (edge_orientation == 2)
{
/* Assume that the edge orientation is based on the vertex indexes */
for (i = 0; i < 2 * nedges; i += 2)
{
if (edge_vertex[i] < edge_vertex[i + 1])
{
data[i] = -1.0;
data[i + 1] = 1.0;
}
else
{
data[i] = 1.0;
data[i + 1] = -1.0;
}
}
}
else
{
hypre_error_in_arg(4);
}
hypre_CSRMatrixI(local) = I;
hypre_CSRMatrixBigJ(local) = edge_vertex;
hypre_CSRMatrixData(local) = data;
hypre_CSRMatrixRownnz(local) = NULL;
hypre_CSRMatrixOwnsData(local) = 1;
hypre_CSRMatrixNumRownnz(local) = nedges;
/* Generate the discrete gradient matrix */
G = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParVectorGlobalSize(x_coord),
hypre_ParCSRMatrixRowStarts(A),
hypre_ParVectorPartitioning(x_coord),
0, 0, 0);
hypre_CSRMatrixBigJtoJ(local);
GenerateDiagAndOffd(local, G,
hypre_ParVectorFirstIndex(x_coord),
hypre_ParVectorLastIndex(x_coord));
/* Account for empty rows in G. These may appear when A includes only
the interior (non-Dirichlet b.c.) edges. */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
G_diag->num_cols = hypre_VectorSize(hypre_ParVectorLocalVector(x_coord));
}
/* Free the local matrix */
hypre_CSRMatrixDestroy(local);
}
*G_ptr = G;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSFEISetup
*
* Construct an AMS solver object based on the following data:
*
* A - the edge element stiffness matrix
* num_vert - number of vertices (nodes) in the processor
* num_local_vert - number of vertices owned by the processor
* vert_number - global indexes of the vertices in the processor
* vert_coord - coordinates of the vertices in the processor
* num_edges - number of edges owned by the processor
* edge_vertex - the vertices of the edges owned by the processor.
* Vertices are in local numbering (the same as in
* vert_number), and edge orientation is always from
* the first to the second vertex.
*
* Here we distinguish between vertices that belong to elements in the
* current processor, and the subset of these vertices that is owned by
* the processor.
*
* This function is written specifically for input from the FEI and should
* be called before hypre_AMSSetup().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSFEISetup(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x,
HYPRE_Int num_vert,
HYPRE_Int num_local_vert,
HYPRE_BigInt *vert_number,
HYPRE_Real *vert_coord,
HYPRE_Int num_edges,
HYPRE_BigInt *edge_vertex)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int i, j;
hypre_ParCSRMatrix *G;
hypre_ParVector *x_coord, *y_coord, *z_coord;
HYPRE_Real *x_data, *y_data, *z_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt vert_part[2], num_global_vert;
HYPRE_BigInt vert_start, vert_end;
HYPRE_BigInt big_local_vert = (HYPRE_BigInt) num_local_vert;
/* Find the processor partitioning of the vertices */
hypre_MPI_Scan(&big_local_vert, &vert_part[1], 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
vert_part[0] = vert_part[1] - big_local_vert;
hypre_MPI_Allreduce(&big_local_vert, &num_global_vert, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
/* Construct hypre parallel vectors for the vertex coordinates */
x_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(x_coord);
hypre_ParVectorOwnsData(x_coord) = 1;
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x_coord));
y_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(y_coord);
hypre_ParVectorOwnsData(y_coord) = 1;
y_data = hypre_VectorData(hypre_ParVectorLocalVector(y_coord));
z_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(z_coord);
hypre_ParVectorOwnsData(z_coord) = 1;
z_data = hypre_VectorData(hypre_ParVectorLocalVector(z_coord));
vert_start = hypre_ParVectorFirstIndex(x_coord);
vert_end = hypre_ParVectorLastIndex(x_coord);
/* Save coordinates of locally owned vertices */
for (i = 0; i < num_vert; i++)
{
if (vert_number[i] >= vert_start && vert_number[i] <= vert_end)
{
j = (HYPRE_Int)(vert_number[i] - vert_start);
x_data[j] = vert_coord[3 * i];
y_data[j] = vert_coord[3 * i + 1];
z_data[j] = vert_coord[3 * i + 2];
}
}
/* Change vertex numbers from local to global */
for (i = 0; i < 2 * num_edges; i++)
{
edge_vertex[i] = vert_number[edge_vertex[i]];
}
/* Construct the local part of G based on edge_vertex */
{
/* HYPRE_Int num_edges = hypre_ParCSRMatrixNumRows(A); */
HYPRE_Int *I = hypre_CTAlloc(HYPRE_Int, num_edges + 1, HYPRE_MEMORY_HOST);
HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2 * num_edges, HYPRE_MEMORY_HOST);
hypre_CSRMatrix *local = hypre_CSRMatrixCreate (num_edges,
num_global_vert,
2 * num_edges);
for (i = 0; i <= num_edges; i++)
{
I[i] = 2 * i;
}
/* Assume that the edge orientation is based on the vertex indexes */
for (i = 0; i < 2 * num_edges; i += 2)
{
data[i] = 1.0;
data[i + 1] = -1.0;
}
hypre_CSRMatrixI(local) = I;
hypre_CSRMatrixBigJ(local) = edge_vertex;
hypre_CSRMatrixData(local) = data;
hypre_CSRMatrixRownnz(local) = NULL;
hypre_CSRMatrixOwnsData(local) = 1;
hypre_CSRMatrixNumRownnz(local) = num_edges;
G = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
num_global_vert,
hypre_ParCSRMatrixRowStarts(A),
vert_part,
0, 0, 0);
hypre_CSRMatrixBigJtoJ(local);
GenerateDiagAndOffd(local, G, vert_start, vert_end);
//hypre_CSRMatrixJ(local) = NULL;
hypre_CSRMatrixDestroy(local);
}
ams_data -> G = G;
ams_data -> x = x_coord;
ams_data -> y = y_coord;
ams_data -> z = z_coord;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSFEIDestroy
*
* Free the additional memory allocated in hypre_AMSFEISetup().
*
* This function is written specifically for input from the FEI and should
* be called before hypre_AMSDestroy().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSFEIDestroy(void *solver)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (ams_data -> G)
{
hypre_ParCSRMatrixDestroy(ams_data -> G);
}
if (ams_data -> x)
{
hypre_ParVectorDestroy(ams_data -> x);
}
if (ams_data -> y)
{
hypre_ParVectorDestroy(ams_data -> y);
}
if (ams_data -> z)
{
hypre_ParVectorDestroy(ams_data -> z);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms Threads
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing"
*
* The above computations are done in a CF manner, whenever the provided
* cf_marker is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRComputeL1NormsThreads(hypre_ParCSRMatrix *A,
HYPRE_Int option,
HYPRE_Int num_threads,
HYPRE_Int *cf_marker,
HYPRE_Real **l1_norm_ptr)
{
HYPRE_Int i, j, k;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real diag;
HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, hypre_ParCSRMatrixMemoryLocation(A));
HYPRE_Int ii, ns, ne, rest, size;
HYPRE_Int *cf_marker_offd = NULL;
HYPRE_Int cf_diag;
/* collect the cf marker data from other procs */
if (cf_marker != NULL)
{
HYPRE_Int index;
HYPRE_Int num_sends;
HYPRE_Int start;
HYPRE_Int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
{
cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
int_buf_data = hypre_CTAlloc(HYPRE_Int,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE
#endif
for (k = 0; k < num_threads; k++)
{
size = num_rows / num_threads;
rest = num_rows - size * num_threads;
if (k < rest)
{
ns = k * size + k;
ne = (k + 1) * size + k + 1;
}
else
{
ns = k * size + rest;
ne = (k + 1) * size + rest;
}
if (option == 1)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)
{
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)
{
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)
if (cf_diag == cf_marker[A_diag_J[j]])
{
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
{
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
}
else if (option == 2)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
{
l1_norm[i] += fabs(A_diag_data[j]);
}
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)
{
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
{
l1_norm[i] += fabs(A_diag_data[j]);
}
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
{
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
}
else if (option == 3)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)
{
l1_norm[i] += A_diag_data[j] * A_diag_data[j];
}
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)
{
l1_norm[i] += A_offd_data[j] * A_offd_data[j];
}
}
}
else if (option == 4)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
{
l1_norm[i] += 0.5 * fabs(A_diag_data[j]);
}
}
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)
{
l1_norm[i] += 0.5 * fabs(A_offd_data[j]);
}
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i + 1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
{
l1_norm[i] += 0.5 * fabs(A_diag_data[j]);
}
}
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i + 1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
{
l1_norm[i] += 0.5 * fabs(A_offd_data[j]);
}
}
}
/* Truncate according to Remark 6.2 */
if (l1_norm[i] <= 4.0 / 3.0 * diag)
{
l1_norm[i] = diag;
}
}
}
else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */
{
/* Set the diag element */
for (i = ns; i < ne; i++)
{
l1_norm[i] = A_diag_data[A_diag_I[i]];
if (l1_norm[i] == 0) { l1_norm[i] = 1.0; }
}
}
if (option < 5)
{
/* Handle negative definite matrices */
for (i = ns; i < ne; i++)
if (A_diag_data[A_diag_I[i]] < 0)
{
l1_norm[i] = -l1_norm[i];
}
for (i = ns; i < ne; i++)
/* if (fabs(l1_norm[i]) < DBL_EPSILON) */
if (fabs(l1_norm[i]) == 0.0)
{
hypre_error_in_arg(1);
break;
}
}
}
hypre_TFree(cf_marker_offd, HYPRE_MEMORY_HOST);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
|
morn_image_transform.c | /*
Copyright (C) 2019-2020 JingWeiZhangHuai <jingweizhanghuai@163.com>
Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
#include "morn_image.h"
#define SRC(CN,X,Y) ((src)->data[CN][Y][X])
void TransformGrid(MImage *src,float (*x_func)(int,int,void *),float (*y_func)(int,int,void *),void *para,MTable *gridx,MTable *gridy,MTable *w)
{
int i, j;
int dst_height = w->row;
int dst_width = w->col;
// printf("dst_height=%d,dst_width=%d\n",dst_height,dst_width);
for(j=0;j<dst_height;j++)for(i=0;i<dst_width;i++)
{
float ly = y_func(i,j,para);
float lx = x_func(i,j,para);
if(lx > 0.00001f) lx -= 0.00001f;
if(ly > 0.00001f) ly -= 0.00001f;
short x_locate = (short)lx;
short y_locate = (short)ly;
if((y_locate<ImageY1(src))||(y_locate>=ImageY2(src)-1)||(x_locate<ImageX1(src,y_locate))||(lx>=ImageX2(src,y_locate)-1))
{
gridx->dataS16[j][i] = DFLT;
gridy->dataS16[j][i] = DFLT;
continue;
}
gridx->dataS16[j][i] = x_locate;
gridy->dataS16[j][i] = y_locate;
x_locate = 15-(short)((lx-(float)x_locate)*15.0f+0.5f);
y_locate = 15-(short)((ly-(float)y_locate)*15.0f+0.5f);
w->dataU8[j][i] = (x_locate<<4)+y_locate;
}
}
void GridInterpolation(MImage *src,MImage *dst,MTable *gridx,MTable *gridy,MTable *w,int mode)
{
int i, j;
int height = dst->height;
int width = dst->width;
unsigned char **s0=src->data[0];unsigned char **s1=src->data[1];unsigned char **s2=src->data[2];unsigned char **s3=src->data[3];
unsigned char **d0=dst->data[0];unsigned char **d1=dst->data[1];unsigned char **d2=dst->data[2];unsigned char **d3=dst->data[3];
mode = ((mode | MORN_NEAREST) == MORN_NEAREST);
#define INTERPOLATION_CACL0(C) do{\
d##C[j][i]=((s##C[y][x]*wx+s##C[y][x+1]*(15-wx))*wy+(s##C[y+1][x]*wx+s##C[y+1][x+1]*(15-wx))*(15-wy)+112)/225;\
}while(0)
#define INTERPOLATION_CACL1(C) do{\
d##C[j][i]=s##C[(wy<8)?y+1:y][(wx<8)?x+1:x];\
}while(0)
// printf("src->channel=%d,height=%d,width=%d\n",src->channel,height,width);
if((src->channel==1)&&(!mode))
{
// #pragma omp parallel for
for(j=0;j<height;j++)for(i=0;i<width;i++)
{
int x = gridx->dataS16[j][i]; if(x<0) {d0[j][i] = 0; continue;} \
int y = gridy->dataS16[j][i];/*if(y<0) {d0[j][i] = 0; continue;}*/\
int wx=(w->dataU8[j][i]>> 4);int wy=(w->dataU8[j][i]&0x0F);\
INTERPOLATION_CACL0(0);
}
return;
}
if((src->channel==1)&&(mode))
{
// #pragma omp parallel for
for(j=0;j<height;j++)for(i=0;i<width;i++)
{
int x = gridx->dataS16[j][i]; if(x<0) {d0[j][i] = 0; continue;} \
int y = gridy->dataS16[j][i];/*if(y<0) {d0[j][i] = 0; continue;}*/\
int wx=(w->dataU8[j][i]>> 4);int wy=(w->dataU8[j][i]&0x0F);\
INTERPOLATION_CACL1(0);
}
return;
}
if((src->channel==2)&&(!mode))
{
// #pragma omp parallel for
for(j=0;j<height;j++)for(i=0;i<width;i++)
{
int x = gridx->dataS16[j][i]; if(x<0) {d0[j][i] = 0;d1[j][i] = 0; continue;} \
int y = gridy->dataS16[j][i];/*if(y<0) {d0[j][i] = 0;d1[j][i] = 0; continue;}*/\
int wx=(w->dataU8[j][i]>> 4);int wy=(w->dataU8[j][i]&0x0F);\
INTERPOLATION_CACL0(0);INTERPOLATION_CACL0(1);
}
return;
}
if((src->channel==2)&&(mode))
{
// #pragma omp parallel for
for(j=0;j<height;j++)for(i=0;i<width;i++)
{
int x = gridx->dataS16[j][i]; if(x<0) {d0[j][i] = 0;d1[j][i] = 0; continue;} \
int y = gridy->dataS16[j][i];/*if(y<0) {d0[j][i] = 0;d1[j][i] = 0; continue;}*/\
int wx=(w->dataU8[j][i]>> 4);int wy=(w->dataU8[j][i]&0x0F);\
INTERPOLATION_CACL1(0);INTERPOLATION_CACL1(1);
}
return;
}
if((src->channel==3)&&(!mode))
{
// #pragma omp parallel for
for(j=0;j<height;j++)for(i=0;i<width;i++)
{
int x = gridx->dataS16[j][i]; if(x<0) {d0[j][i] = 0;d1[j][i] = 0;d2[j][i] = 0; continue;}
int y = gridy->dataS16[j][i];/*if(y<0) {d0[j][i] = 0;d1[j][i] = 0;d2[j][i] = 0; continue;}*/
int wx=(w->dataU8[j][i]>> 4);int wy=(w->dataU8[j][i]&0x0F);
INTERPOLATION_CACL0(0);INTERPOLATION_CACL0(1);INTERPOLATION_CACL0(2);
}
return;
}
if((src->channel==3)&&(mode))
{
// #pragma omp parallel for
for(j=0;j<height;j++)for(i=0;i<width;i++)
{
int x = gridx->dataS16[j][i]; if(x<0) {d0[j][i] = 0;d1[j][i] = 0;d2[j][i] = 0; continue;}
int y = gridy->dataS16[j][i];/*if(y<0) {d0[j][i] = 0;d1[j][i] = 0;d2[j][i] = 0; continue;}*/
int wx=(w->dataU8[j][i]>> 4);int wy=(w->dataU8[j][i]&0x0F);
INTERPOLATION_CACL1(0);INTERPOLATION_CACL1(1);INTERPOLATION_CACL1(2);
}
return;
}
if((src->channel==4)&&(!mode))
{
// #pragma omp parallel for
for(j=0;j<height;j++)for(i=0;i<width;i++)
{
int x = gridx->dataS16[j][i]; if(x<0) {d0[j][i] = 0;d1[j][i] = 0;d2[j][i] = 0;d3[j][i] = 0; continue;}
int y = gridy->dataS16[j][i];/*if(y<0) {d0[j][i] = 0;d1[j][i] = 0;d2[j][i] = 0;d3[j][i] = 0; continue;}*/
int wx=(w->dataU8[j][i]>> 4);int wy=(w->dataU8[j][i]&0x0F);
INTERPOLATION_CACL0(0);INTERPOLATION_CACL0(1);INTERPOLATION_CACL0(2);INTERPOLATION_CACL0(3);
}
return;
}
if((src->channel==4)&&(mode))
{
// #pragma omp parallel for
for(j=0;j<height;j++)for(i=0;i<width;i++)
{
int x = gridx->dataS16[j][i]; if(x<0) {d0[j][i] = 0;d1[j][i] = 0;d2[j][i] = 0;d3[j][i] = 0; continue;}
int y = gridy->dataS16[j][i];/*if(y<0) {d0[j][i] = 0;d1[j][i] = 0;d2[j][i] = 0;d3[j][i] = 0; continue;}*/
int wx=(w->dataU8[j][i]>> 4);int wy=(w->dataU8[j][i]&0x0F);
INTERPOLATION_CACL1(0);INTERPOLATION_CACL1(1);INTERPOLATION_CACL1(2);INTERPOLATION_CACL1(3);
}
return;
}
}
struct HandleImageCoordinateTransform
{
int height;
int width;
float (*x_func)(int,int,void *);
float (*y_func)(int,int,void *);
MTable *lx;
MTable *ly;
MTable *w;
};
void endImageCoordinateTransform(void *handle)
{
struct HandleImageCoordinateTransform *info = (struct HandleImageCoordinateTransform *)handle;
if(info->lx != NULL) mTableRelease(info->lx);
if(info->lx != NULL) mTableRelease(info->ly);
if(info->w != NULL) mTableRelease(info->w );
}
#define HASH_ImageCoordinateTransform 0x5f44c7bc
void mImageCoordinateTransform(MImage *src,MImage *dst,float (*x_func)(int,int,void *),float (*y_func)(int,int,void *),void *para,int mode)
{
mException(INVALID_IMAGE(src),EXIT,"invalid input");
MImage *p=dst;
if(INVALID_POINTER(dst)||(dst==src))
dst = mImageCreate(src->channel,src->height,src->width,NULL);
int width; if(dst->width <=0) width = src->width; else width = dst->width;
int height; if(dst->height<=0) height= src->height; else height= dst->height;
mImageRedefine(dst,src->channel,height,width,dst->data);
// memcpy(&(dst->info),&(src->info),sizeof(MInfo));
MHandle *hdl=mHandle(src,ImageCoordinateTransform);
struct HandleImageCoordinateTransform *handle = (struct HandleImageCoordinateTransform *)(hdl->handle);
if((hdl->valid == 0)||(handle->height!=height)||(handle->width!=width)||(handle->x_func!=x_func)||(handle->y_func!=y_func))
{
handle->height = height;
handle->width = width;
handle->x_func = x_func;
handle->y_func = y_func;
if(handle->lx==NULL) handle->lx=mTableCreate(height,width,sizeof(short),NULL);
else mTableRedefine(handle->lx,height,width,sizeof(short),NULL);
if(handle->ly==NULL) handle->ly=mTableCreate(height,width,sizeof(short),NULL);
else mTableRedefine(handle->ly,height,width,sizeof(short),NULL);
if(handle->w ==NULL) handle->w =mTableCreate(height,width,sizeof(unsigned char),NULL);
else mTableRedefine(handle->w ,height,width,sizeof(unsigned char),NULL);
TransformGrid(src,x_func,y_func,para,handle->lx,handle->ly,handle->w);
}
GridInterpolation(src,dst,handle->lx,handle->ly,handle->w,mode);
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
hdl->valid = 1;
}
void PerspectivePara(MImagePoint *ps,MImagePoint *pd,float *para)
{
MMatrix *mat = mMatrixCreate(8,9,NULL,DFLT);
for(int n=0,j=0;n<4;n=n+1,j=j+2)
{
mat->data[j][0] = pd[n].x; mat->data[j+1][0] = 0.0f;
mat->data[j][1] = pd[n].y; mat->data[j+1][1] = 0.0f;
mat->data[j][2] = 1.0f; mat->data[j+1][2] = 0.0f;
mat->data[j][3] = 0.0f; mat->data[j+1][3] = pd[n].x;
mat->data[j][4] = 0.0f; mat->data[j+1][4] = pd[n].y;
mat->data[j][5] = 0.0f; mat->data[j+1][5] = 1.0f;
mat->data[j][6] = 0.0f-pd[n].x*ps[n].x; mat->data[j+1][6] = 0.0f-pd[n].x*ps[n].y;
mat->data[j][7] = 0.0f-pd[n].y*ps[n].x; mat->data[j+1][7] = 0.0f-pd[n].y*ps[n].y;
mat->data[j][8] = 0.0f-ps[n].x; mat->data[j+1][8] = 0.0f-ps[n].y;
}
mLinearEquation(mat,para);
mMatrixRelease(mat);
}
float Perspective_x(int u,int v,void *para)
{
float *p = (float *)para;
return ((p[0]*u+p[1]*v+p[2])/(p[6]*u+p[7]*v+1.0f));
}
float Perspective_y(int u,int v,void *para)
{
float *p = (float *)para;
return ((p[3]*u+p[4]*v+p[5])/(p[6]*u+p[7]*v+1.0f));
}
struct HandleImagePerspectiveCorrection
{
int height;
int width;
MImagePoint ps[4];
MImagePoint pd[4];
MTable *lx;
MTable *ly;
MTable *w;
};
void endImagePerspectiveCorrection(void *handle)
{
struct HandleImagePerspectiveCorrection *info = (struct HandleImagePerspectiveCorrection *)handle;
if(info->lx != NULL) mTableRelease(info->lx);
if(info->lx != NULL) mTableRelease(info->ly);
if(info->w != NULL) mTableRelease(info->w );
}
#define HASH_ImagePerspectiveCorrection 0xdd819d48
void mImagePerspectiveCorrection(MImage *src,MImage *dst,MImagePoint *ps,MImagePoint *pd,int mode)
{
mException(INVALID_IMAGE(src),EXIT,"invalid source image");
MImage *p = dst;
if(INVALID_POINTER(dst)||(dst==src))
dst = mImageCreate(src->channel,src->height,src->width,NULL);
else if((dst->height<=0)||(dst->width<=0))
mImageRedefine(dst,src->channel,src->height,src->width,dst->data);
else
mImageRedefine(dst,src->channel,DFLT,DFLT,dst->data);
// memcpy(&(dst->info),&(src->info),sizeof(MInfo));
int height = dst->height;
int width = dst->width;
MImagePoint p_s[4],p_d[4];
if(ps==NULL)
{
ps=p_s;
mPoint(ps+0, 0, 0);
mPoint(ps+1,src->width, 0);
mPoint(ps+2,src->width,src->height);
mPoint(ps+3, 0,src->height);
}
if(pd==NULL)
{
pd=p_d;
mPoint(pd+0, 0, 0);
mPoint(pd+1,width, 0);
mPoint(pd+2,width,height);
mPoint(pd+3, 0,height);
}
MHandle *hdl=mHandle(src,ImagePerspectiveCorrection);
struct HandleImagePerspectiveCorrection *handle = (struct HandleImagePerspectiveCorrection *)(hdl->handle);
if((hdl->valid == 0)
||(memcmp(ps,handle->ps,4*sizeof(MImagePoint))!=0)
||(memcmp(pd,handle->pd,4*sizeof(MImagePoint))!=0)
||(handle->height != height)
||(handle->width != width))
{
float para[8];
PerspectivePara(ps,pd,para);
// MMatrix *mat = mMatrixCreate(8,9,NULL);
// for(int n=0,j=0;n<4;n=n+1,j=j+2)
// {
// mat->data[j][0] = pd[n].x; mat->data[j+1][0] = 0.0f;
// mat->data[j][1] = pd[n].y; mat->data[j+1][1] = 0.0f;
// mat->data[j][2] = 1.0f; mat->data[j+1][2] = 0.0f;
// mat->data[j][3] = 0.0f; mat->data[j+1][3] = pd[n].x;
// mat->data[j][4] = 0.0f; mat->data[j+1][4] = pd[n].y;
// mat->data[j][5] = 0.0f; mat->data[j+1][5] = 1.0f;
// mat->data[j][6] = 0.0f-pd[n].x*ps[n].x; mat->data[j+1][6] = 0.0f-pd[n].x*ps[n].y;
// mat->data[j][7] = 0.0f-pd[n].y*ps[n].x; mat->data[j+1][7] = 0.0f-pd[n].y*ps[n].y;
// mat->data[j][8] = 0.0f-ps[n].x; mat->data[j+1][8] = 0.0f-ps[n].y;
// }
// mLinearEquation(mat,para);
// mMatrixRelease(mat);
if(handle->lx==NULL) handle->lx=mTableCreate(height,width,sizeof(short),NULL);
else mTableRedefine(handle->lx,height,width,sizeof(short),NULL);
if(handle->ly==NULL) handle->ly=mTableCreate(height,width,sizeof(short),NULL);
else mTableRedefine(handle->ly,height,width,sizeof(short),NULL);
if(handle->w ==NULL) handle->w =mTableCreate(height,width,sizeof(unsigned char),NULL);
else mTableRedefine(handle->w ,height,width,sizeof(unsigned char),NULL);
TransformGrid(src,Perspective_x,Perspective_y,para,handle->lx,handle->ly,handle->w);
memcpy(handle->ps,ps,4*sizeof(MImagePoint));
memcpy(handle->pd,pd,4*sizeof(MImagePoint));
handle->height = height;
handle->width = width;
}
GridInterpolation(src,dst,handle->lx,handle->ly,handle->w,mode);
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
hdl->valid = 1;
}
float Affine_x(int u,int v,void *para)
{
float *p = (float *)para;
return (p[0]*u+p[1]*v+p[2]);
}
float Affine_y(int u,int v,void *para)
{
float *p = (float *)para;
return (p[3]*u+p[4]*v+p[5]);
}
struct HandleImageAffineCorrection
{
int height;
int width;
MImagePoint ps[3];
MImagePoint pd[3];
MTable *lx;
MTable *ly;
MTable *w;
};
void endImageAffineCorrection(void *handle)
{
struct HandleImageAffineCorrection *info = (struct HandleImageAffineCorrection *)handle;
if(info->lx != NULL) mTableRelease(info->lx);
if(info->lx != NULL) mTableRelease(info->ly);
if(info->w != NULL) mTableRelease(info->w );
}
#define HASH_ImageAffineCorrection 0x1670806b
void mImageAffineCorrection(MImage *src,MImage *dst,MImagePoint *ps,MImagePoint *pd,int mode)
{
mException(INVALID_IMAGE(src),EXIT,"invalid source image");
MImage *p = dst;
if(INVALID_POINTER(dst)||(dst==src))
dst = mImageCreate(src->channel,src->height,src->width,NULL);
else if((dst->height<=0)||(dst->width<=0))
mImageRedefine(dst,src->channel,src->height,src->width,dst->data);
else
mImageRedefine(dst,src->channel,DFLT,DFLT,dst->data);
// memcpy(&(dst->info),&(src->info),sizeof(MInfo));
int height = dst->height;
int width = dst->width;
MHandle *hdl=mHandle(src,ImageAffineCorrection);
struct HandleImageAffineCorrection *handle = (struct HandleImageAffineCorrection *)(hdl->handle);
if((hdl->valid == 0)
||(memcmp(ps,handle->ps,3*sizeof(MImagePoint))!=0)
||(memcmp(pd,handle->pd,3*sizeof(MImagePoint))!=0)
||(handle->height != height)
||(handle->width != width))
{
float para[6];
float a1 = pd[1].x - pd[0].x;
float a2 = pd[2].x - pd[0].x;
float b1 = pd[1].y - pd[0].y;
float b2 = pd[2].y - pd[0].y;
float c = (a1*b2 -a2*b1);
mException((c==0.0f),EXIT,"invalid achor point");
float c1,c2;
c1 = ps[1].x-ps[0].x;
c2 = ps[2].x-ps[0].x;
para[0] = (c1*b2 - c2*b1)/c;
para[1] = (c2*a1 - c1*a2)/c;
para[2] = ps[0].x - (para[0]*pd[0].x + para[1]*pd[0].y);
c1 = ps[1].y-ps[0].y;
c2 = ps[2].y-ps[0].y;
para[3] = (c1*b2 - c2*b1)/c;
para[4] = (c2*a1 - c1*a2)/c;
para[5] = ps[0].y - (para[3]*pd[0].x + para[4]*pd[0].y);
if(handle->lx==NULL) handle->lx=mTableCreate(height,width,sizeof(short),NULL);
else mTableRedefine(handle->lx,height,width,sizeof(short),NULL);
if(handle->ly==NULL) handle->ly=mTableCreate(height,width,sizeof(short),NULL);
else mTableRedefine(handle->ly,height,width,sizeof(short),NULL);
if(handle->w ==NULL) handle->w =mTableCreate(height,width,sizeof(unsigned char),NULL);
else mTableRedefine(handle->w ,height,width,sizeof(unsigned char),NULL);
TransformGrid(src,Affine_x,Affine_y,para,handle->lx,handle->ly,handle->w);
memcpy(handle->ps,ps,3*sizeof(MImagePoint));
memcpy(handle->pd,pd,3*sizeof(MImagePoint));
handle->height = height;
handle->width = width;
}
GridInterpolation(src,dst,handle->lx,handle->ly,handle->w,mode);
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
hdl->valid = 1;
}
void ImageRotate90(MImage *src,MImage *dst)
{
int i,j,cn;
mImageRedefine(dst,src->channel,src->width,src->height,dst->data);
int height = dst->height-1;
int width = dst->width-1;
for(cn=0;cn<dst->channel;cn++)
{
unsigned char **sdata = src->data[cn];
unsigned char **ddata = dst->data[cn];
for(j=0;j<=height;j++)
for(i=0;i<=width;i++)
ddata[j][i] = sdata[width-i][j];
}
}
void ImageRotate180(MImage *src,MImage *dst)
{
int i,j,cn;
mImageRedefine(dst,src->channel,src->height,src->width,dst->data);
int height = dst->height-1;
int width = dst->width-1;
for(cn=0;cn<dst->channel;cn++)
{
unsigned char **sdata = src->data[cn];
unsigned char **ddata = dst->data[cn];
for(j=0;j<=height;j++)
for(i=0;i<=width;i++)
ddata[j][i] = sdata[height-j][width-i];
}
}
void ImageRotate270(MImage *src,MImage *dst)
{
int i,j,cn;
mImageRedefine(dst,src->channel,src->width,src->height,dst->data);
int height = dst->height-1;
int width = dst->width-1;
for(cn=0;cn<dst->channel;cn++)
{
unsigned char **sdata = src->data[cn];
unsigned char **ddata = dst->data[cn];
for(j=0;j<=height;j++)
for(i=0;i<=width;i++)
ddata[j][i] = sdata[i][height-j];
}
}
struct HandleImageRotate
{
int height;
int width;
MImagePoint src_hold;
MImagePoint dst_hold;
float angle;
MTable *lx;
MTable *ly;
MTable *w;
};
void endImageRotate(void *handle)
{
struct HandleImageRotate *info = (struct HandleImageRotate *)handle;
if(info->lx != NULL) mTableRelease(info->lx);
if(info->lx != NULL) mTableRelease(info->ly);
if(info->w != NULL) mTableRelease(info->w );
}
#define HASH_ImageRotate 0x35b8aedf
void mImageRotate(MImage *src,MImage *dst,MImagePoint *src_hold,MImagePoint *dst_hold,float angle,int mode)
{
mException(INVALID_IMAGE(src),EXIT,"invalid source image");
if(INVALID_POINTER(dst)) dst=src;
float scx,scy,dcx,dcy;
if(src_hold == NULL) {scx = ((float)(src->width))/2.0f; scy = ((float)(src->height))/2.0f;}
else {scx = src_hold->x; scy = src_hold->y; }
int height=dst->height;int width=dst->width;
if(angle==0.0f)
{
mImageCut(src,dst,scx-width/2,scx+width/2,scy-height/2,scy+height/2);
return;
}
float sn = mSin(angle);
float cs = mCos(angle);
MImage *p = dst;
if(dst==src)
{
height = (int)(src->height*ABS(cs)+src->width*ABS(sn));
width = (int)(src->height*ABS(sn)+src->width*ABS(cs));
if(dst_hold != NULL)
{
float x =((scx+scx-src->width)*cs - (scy+scy-src->height)*sn + height)/2.0f;
float y =((scx+scx-src->width)*sn + (scy+scy-src->height)*cs + width)/2.0f;
width = width + (int)(x - dst_hold->x);
height= height+ (int)(y + dst_hold->y);
}
dst = mImageCreate(src->channel,height,width,NULL);
}
else if((height<=0)||(width<=0))
{
height = (int)(src->height*ABS(cs)+src->width*ABS(sn));
width = (int)(src->height*ABS(sn)+src->width*ABS(cs));
if(dst_hold != NULL)
{
float x =((scx+scx-src->width)*cs - (scy+scy-src->height)*sn + height)/2.0f;
float y =((scx+scx-src->width)*sn + (scy+scy-src->height)*cs + width)/2.0f;
width = width + (int)(x - dst_hold->x);
height= height+ (int)(y + dst_hold->y);
}
mImageRedefine(dst,src->channel,height,width,dst->data);
}
else
mImageRedefine(dst,src->channel,DFLT,DFLT,dst->data);
mException(INVALID_IMAGE(dst),EXIT,"invalid error");
// memcpy(&(dst->info),&(src->info),sizeof(MInfo));
if(dst_hold == NULL) {dcx = ((float)width)/2.0f;dcy = ((float)height)/2.0f;}
else {dcx = dst_hold->x; dcy = dst_hold->y; }
if(angle == 90.0f)
if((scx == dcy)&&(scy == dcx))
if((src->width == height)&&(src->height == width))
{ImageRotate90(src,dst);if(p!=dst){mImageExchange(src,dst);mImageRelease(dst);}return;}
if(angle == 180.0f)
if((scx == dcx)&&(scy == dcy))
if((src->width == width)&&(src->height == height))
{ImageRotate180(src,dst);if(p!=dst){mImageExchange(src,dst);mImageRelease(dst);}return;}
if(angle == 270.0f)
if((scx == dcy)&&(scy == dcx))
if((src->width == height)&&(src->height == width))
{ImageRotate270(src,dst);if(p!=dst){mImageExchange(src,dst);mImageRelease(dst);}return;}
MHandle *hdl=mHandle(src,ImageRotate);
struct HandleImageRotate *handle = (struct HandleImageRotate *)(hdl->handle);
if((hdl->valid == 0)
||(handle->src_hold.x != scx)||(handle->src_hold.y != scy)
||(handle->dst_hold.x != dcx)||(handle->dst_hold.y != dcy)
||(handle->angle !=angle)
||(handle->height != height)
||(handle->width != width))
{
float para[6];
para[0] = cs;
para[1] = sn;
para[2] = scx - dcx*cs - dcy*sn;
para[3] = 0-sn;
para[4] = cs;
para[5] = scy + dcx*sn - dcy*cs;
if(handle->lx==NULL) handle->lx=mTableCreate(height,width,sizeof(short),NULL);
else mTableRedefine(handle->lx,height,width,sizeof(short),NULL);
if(handle->ly==NULL) handle->ly=mTableCreate(height,width,sizeof(short),NULL);
else mTableRedefine(handle->ly,height,width,sizeof(short),NULL);
if(handle->w ==NULL) handle->w =mTableCreate(height,width,sizeof(unsigned char),NULL);
else mTableRedefine(handle->w ,height,width,sizeof(unsigned char),NULL);
TransformGrid(src,Affine_x,Affine_y,para,handle->lx,handle->ly,handle->w);
handle->src_hold.x = scx; handle->src_hold.y = scy;
handle->dst_hold.x = dcx; handle->dst_hold.y = dcy;
handle->angle = angle;
handle->height = height;
handle->width = width;
}
GridInterpolation(src,dst,handle->lx,handle->ly,handle->w,mode);
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
hdl->valid = 1;
}
#define TRANSFORM_VALUE(Src,Sx,Sy,Dst,Dx,Dy) {\
int Y1,Y2,X1,X2;\
float Wx,Wy;\
float W11,W12,W21,W22;\
int Cn;\
\
if((Sx<0)||(Sy<0)||(Sx>Src->width-1)||(Sy>Src->height-1))\
{\
for(Cn=0;Cn<dst->channel;Cn++)\
Dst->data[Cn][Dy][Dx] = 0;\
}\
else\
{\
Y1 = (int)Sy;\
Y2 = Y1+1;\
Wy = Sy-(float)Y1;\
\
X1 = (int)Sx;\
X2 = X1+1;\
Wx = Sx-(float)X1;\
\
W22 = Wx*Wy;\
W11 = 1.0f-Wx-Wy+W22;\
W21 = Wx-W22;\
W12 = Wy-W22;\
\
for(Cn=0;Cn<dst->channel;Cn++)\
Dst->data[Cn][Dy][Dx] = Src->data[Cn][Y1][X1]*W11+Src->data[Cn][Y1][X2]*W21+Src->data[Cn][Y2][X1]*W12+Src->data[Cn][Y2][X2]*W22;\
}\
}
/*
void mImageCoordinateTransform(MImage *src,MImage *dst,float (*x_func)(int,int),float (*y_func)(int,int))
{
int i,j,cn;
float lx,ly;
int x1,x2,y1,y2;
float wx,wy;
float w11,w12,w21,w22;
MImage *p;
mException(INVALID_IMAGE(src),"invalid input",EXIT);
p=dst;
if(INVALID_POINTER(dst)||(dst==src))
dst = mImageCreate(src->channel,src->height,src->width,NULL);
else
{
mException(INVALID_IMAGE(dst),"invalid input",EXIT);
mException((src->channel != dst->channel),"invalid input",EXIT);
}
for(j=0;j<dst->height;j++)
for(i=0;i<dst->width;i++)
{
lx = x_func(i,j);
ly = y_func(i,j);
y1 = (int)ly;
y2 = y1+1;
wy = ly-(float)y1;
x1 = (int)lx;
x2 = x1+1;
wx = lx-(float)x1;
w22 = wx*wy;
w11 = 1.0f-wx-wy+w22;
w21 = wx-w22;
w12 = wy-w22;
for(cn=0;cn<dst->channel;cn++)
dst->data[cn][j][i] = SRC(cn,x1,y1)*w11+SRC(cn,x2,y1)*w21+SRC(cn,x1,y2)*w12+SRC(cn,x2,y2)*w22;
}
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
}
*/
struct DeformationTemplate {
int locate_x;
int locate_y;
MMatrix *x;
MMatrix *y;
};
void ImageDeformation(MImage *src,MImage *dst,struct DeformationTemplate *temp)
{
int i,j,cn,m,n;
float lx,ly;
int x1,x2,y1,y2;
float wx,wy;
float w11,w12,w21,w22;
MImage *p;
MMatrix *tx,*ty;
int si,ei,sj,ej;
mException(INVALID_IMAGE(src)||INVALID_POINTER(temp),EXIT,"invalid input");
mException(((temp->locate_x>=src->width)||(temp->locate_y>=src->height)),EXIT,"invalid temp");
tx = temp->x;
ty = temp->y;
if(temp->locate_x <0)
{
m=0-temp->locate_x;
si=0;
}
else
{
m=0;
si=temp->locate_x;
}
ei = MIN(dst->width,(si+tx->col));
if(temp->locate_y <0)
{
n=0-temp->locate_y;
sj=0;
}
else
{
n=0;
sj=temp->locate_y;
}
ej = MIN(dst->height,(sj+tx->row));
p=dst;
if(INVALID_POINTER(dst)||(dst==src))
{
dst = src;
src = mImageCreate(dst->channel,dst->height,dst->width,NULL);
for(cn=0;cn<dst->channel;cn++)
for(j=sj;(j<dst->height)&&(n<tx->row);j++,n++)
memcpy(src->data[cn][j]+si,dst->data[cn][j]+si,(ei-si)*sizeof(unsigned char));
}
else
{
mException(INVALID_IMAGE(dst),EXIT,"invalid input");
mException((src->channel != dst->channel)||(src->height!=dst->height)||(src->width!=dst->width),EXIT,"invalid input");
for(cn=0;cn<dst->channel;cn++)
for(j=0;j<dst->height;j++)
memcpy(dst->data[cn][j],src->data[cn][j],src->width*sizeof(unsigned char));
}
for(j=sj;j<ej;j++,n++)
for(i=si;i<ei;i++,m++)
{
lx = tx->data[n][m];
ly = ty->data[n][m];
if((lx<=0.0f)||(ly<=0.0f))
continue;
y1 = (int)ly;
y2 = y1+1;
wy = ly-(float)y1;
x1 = (int)lx;
x2 = x1+1;
wx = lx-(float)x1;
w22 = wx*wy;
w11 = 1.0f-wx-wy+w22;
w21 = wx-w22;
w12 = wy-w22;
for(cn=0;cn<dst->channel;cn++)
dst->data[cn][j][i] = (unsigned char)(SRC(cn,x1,y1)*w11+SRC(cn,x2,y1)*w21+SRC(cn,x1,y2)*w12+SRC(cn,x2,y2)*w22);
}
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
}
void TemplateCacuate(int x_in,int y_in,int R,float k,float cx,float cy,int *x_out,int *y_out)
{
float l;
float d_x,d_y;
float d;
d_x = (x_in-cx);
d_y = (y_in-cy);
d = d_x*d_x+d_y*d_y;
if(d>=(float)(R*R))
{
*x_out = DFLT;
*y_out = DFLT;
return;
}
d = (float)sqrt(d);
l = (1.0f-(d/((float)R)));
l = l*l;
l =(k-1.0f)*l + 1.0f;
*x_out = (int)(cx + d_x*l);
*y_out = (int)(cy + d_y*l);
}
/*
int GrtTemplate(struct DeformationTemplate *tep,MList *curve,int R,float k)
{
int height;
int width;
int i,j;
int x0,y0,x1,y1;
int n;
MImagePoint **point;
height = tep->x->row;
width = tep->x->col;
point = curve->data;
d1 = (point[0]->x)*(point[0]->x)+(point[0]->y)*(point[0]->y);
d2 = (point[1]->x)*(point[1]->x)+(point[1]->y)*(point[1]->y);
min = d1+d2;
min_index=0;
n=1;
while(n+1<curve->num)
{
d1 = d2;
d2 = (point[n+1]->x)*(point[n+1]->x)+(point[n+1]->y)*(point[n+1]->y);
if(d1+d2<min)
{
min = d1+d2;
min_index = n;
}
n=n+1;
}
TemplateCacuate(0,0,R,k,point[min_index]->x,point[min_index]->y,point[min_index+1]->x,point[min_index+1]->y,tep->x->data[0][0],tep->y->data[0][0]);
for(j=0;j<height;j=j+2)
{
for(i=0;i<width;i++)
{
while((n>0)&&(n<curve->num))
{
d1 =
*/
void mImageReshapeTemplate(MList *src,MList *dst,MTable *lx,MTable *ly,MTable *w)
{
int height = w->row;
int width = w->col;
int x_step = (width+30)/31;
int y_step = (height+30)/31;
float area = (float)(x_step*y_step);
int i,j,k,m,n;
float x_locate[32][32];
float y_locate[32][32];
MImagePoint **ps = (MImagePoint **)(src->data);
MImagePoint **pd = (MImagePoint **)(dst->data);
#define DISTANCE(x1,y1,x2,y2) (float)(sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)))
for(n=0,j=0;n<32;n++,j+=y_step)
for(m=0,i=0;m<32;m++,i+=x_step)
{
float sum = 0.0f;
float dx = 0.0f;
float dy = 0.0f;
for(k=0;k<dst->num;k++)
{
float d = DISTANCE(i,j,pd[k]->x,pd[k]->y);
if(d == 0.0f)
{
dx = (pd[k]->x - ps[k]->x);
dy = (pd[k]->y - ps[k]->y);
break;
}
sum += 1.0f/d;
dx += (pd[k]->x - ps[k]->x)/d;
dy += (pd[k]->y - ps[k]->y)/d;
}
if(k == dst->num)
{
dx = dx/sum;
dy = dy/sum;
}
x_locate[n][m] = dx + i;
y_locate[n][m] = dy + j;
}
#pragma omp parallel for
for(j=0;j<height;j++)
{
n = j/y_step;
float wy2 = (float)(j%y_step);
float wy1 = y_step - wy2;
for(i=0;i<width;i++)
{
m = i/x_step;
float wx2 = (float)(i%x_step);
float wx1 = x_step-wx2;
float x =((x_locate[n ][m]*wx1 + x_locate[n ][m+1]*wx2)*wy1
+ (x_locate[n+1][m]*wx1 + x_locate[n+1][m+1]*wx2)*wy2)/area;
float y =((y_locate[n ][m]*wx1 + y_locate[n ][m+1]*wx2)*wy1
+ (y_locate[n+1][m]*wx1 + y_locate[n+1][m+1]*wx2)*wy2)/area;
int wx = (int)x;
int wy = (int)y;
lx->dataS16[j][i] = wx;
ly->dataS16[j][i] = wy;
wx = 16-(int)((x-(float)wx)/0.0625f+0.5f);
wy = 16-(int)((y-(float)wy)/0.0625f+0.5f);
w->dataU8[j][i] = (wx<<4)+wy;
}
}
}
struct HandleImageReshape
{
MTable *lx;
MTable *ly;
MTable *w;
};
void endImageReshape(void *info)
{
struct HandleImageReshape *handle = (struct HandleImageReshape *)info;
if(handle->lx != NULL) mTableRelease(handle->lx);
if(handle->lx != NULL) mTableRelease(handle->ly);
if(handle->w != NULL) mTableRelease(handle->w );
}
#define HASH_ImageReshape 0xe21f102e
void mImageReshape(MImage *src,MImage *dst,MList *src_point,MList *dst_point,int mode)
{
mException(INVALID_IMAGE(src),EXIT,"invalid source image");
MImage *p = dst;
if(INVALID_POINTER(dst)||(dst==src))
dst = mImageCreate(src->channel,src->height,src->width,NULL);
else if((dst->height<=0)||(dst->width<=0))
mImageRedefine(dst,src->channel,src->height,src->width,dst->data);
else
mImageRedefine(dst,src->channel,DFLT,DFLT,dst->data);
// memcpy(&(dst->info),&(src->info),sizeof(MInfo));
MHandle *hdl=mHandle(dst,ImageReshape);
struct HandleImageReshape *handle = (struct HandleImageReshape *)(hdl->handle);
{
if(handle->lx == NULL) handle->lx= mTableCreate(dst->height,dst->width,sizeof(short),NULL);
else mTableRedefine(handle->lx,dst->height,dst->width,sizeof(short),NULL);
if(handle->ly == NULL) handle->ly= mTableCreate(dst->height,dst->width,sizeof(short),NULL);
else mTableRedefine(handle->ly,dst->height,dst->width,sizeof(short),NULL);
if(handle-> w == NULL) handle->w = mTableCreate(dst->height,dst->width,sizeof(unsigned char),NULL);
else mTableRedefine(handle->w ,dst->height,dst->width,sizeof(unsigned char),NULL);
}
mImageReshapeTemplate(src_point,dst_point,handle->lx,handle->ly,handle->w);
GridInterpolation(src,dst,handle->lx,handle->ly,handle->w,mode);
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
hdl->valid = 1;
}
|
GB_unop__conj_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__conj_fc32_fc32)
// op(A') function: GB (_unop_tran__conj_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = conjf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = conjf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = conjf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CONJ || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__conj_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = conjf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = conjf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__conj_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
matrix.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M AAA TTTTT RRRR IIIII X X %
% MM MM A A T R R I X X %
% M M M AAAAA T RRRR I X %
% M M A A T R R I X X %
% M M A A T R R IIIII X X %
% %
% %
% MagickCore Matrix Methods %
% %
% Software Design %
% Cristy %
% August 2007 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image-private.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/pixel-private.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/thread-private.h"
#include "magick/utility.h"
/*
Typedef declaration.
*/
struct _MatrixInfo
{
CacheType
type;
size_t
columns,
rows,
stride;
MagickSizeType
length;
MagickBooleanType
mapped,
synchronize;
char
path[MaxTextExtent];
int
file;
void
*elements;
SemaphoreInfo
*semaphore;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMatrixInfo() allocates the ImageInfo structure.
%
% The format of the AcquireMatrixInfo method is:
%
% MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows,
% const size_t stride,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: the matrix columns.
%
% o rows: the matrix rows.
%
% o stride: the matrix stride.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(SIGBUS)
static void MatrixSignalHandler(int status)
{
ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache");
}
#endif
static inline MagickOffsetType WriteMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX));
#else
count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
static MagickBooleanType SetMatrixExtent(
MatrixInfo *magick_restrict matrix_info,MagickSizeType length)
{
MagickOffsetType
count,
extent,
offset;
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (matrix_info->synchronize != MagickFalse)
(void) posix_fallocate(matrix_info->file,offset+1,extent-offset);
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,MatrixSignalHandler);
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns,
const size_t rows,const size_t stride,ExceptionInfo *exception)
{
char
*synchronize;
MagickBooleanType
status;
MatrixInfo
*matrix_info;
matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info));
if (matrix_info == (MatrixInfo *) NULL)
return((MatrixInfo *) NULL);
(void) memset(matrix_info,0,sizeof(*matrix_info));
matrix_info->signature=MagickCoreSignature;
matrix_info->columns=columns;
matrix_info->rows=rows;
matrix_info->stride=stride;
matrix_info->semaphore=AllocateSemaphoreInfo();
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
matrix_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
matrix_info->length=(MagickSizeType) columns*rows*stride;
if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,matrix_info->length);
if ((status != MagickFalse) &&
(matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length)))
{
status=AcquireMagickResource(MemoryResource,matrix_info->length);
if (status != MagickFalse)
{
matrix_info->mapped=MagickFalse;
matrix_info->elements=AcquireMagickMemory((size_t)
matrix_info->length);
if (matrix_info->elements == NULL)
{
matrix_info->mapped=MagickTrue;
matrix_info->elements=MapBlob(-1,IOMode,0,(size_t)
matrix_info->length);
}
if (matrix_info->elements == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,matrix_info->length);
}
}
matrix_info->file=(-1);
if (matrix_info->elements == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,matrix_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=DiskCache;
matrix_info->file=AcquireUniqueFileResource(matrix_info->path);
if (matrix_info->file == -1)
return(DestroyMatrixInfo(matrix_info));
status=AcquireMagickResource(MapResource,matrix_info->length);
if (status != MagickFalse)
{
status=SetMatrixExtent(matrix_info,matrix_info->length);
if (status != MagickFalse)
matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0,
(size_t) matrix_info->length);
if (matrix_info->elements != NULL)
matrix_info->type=MapCache;
else
RelinquishMagickResource(MapResource,matrix_info->length);
}
}
return(matrix_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMagickMatrix() allocates and returns a matrix in the form of an
% array of pointers to an array of doubles, with all values pre-set to zero.
%
% This used to generate the two dimensional matrix, and vectors required
% for the GaussJordanElimination() method below, solving some system of
% simultanious equations.
%
% The format of the AcquireMagickMatrix method is:
%
% double **AcquireMagickMatrix(const size_t number_rows,
% const size_t size)
%
% A description of each parameter follows:
%
% o number_rows: the number pointers for the array of pointers
% (first dimension).
%
% o size: the size of the array of doubles each pointer points to
% (second dimension).
%
*/
MagickExport double **AcquireMagickMatrix(const size_t number_rows,
const size_t size)
{
double
**matrix;
ssize_t
i,
j;
matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix));
if (matrix == (double **) NULL)
return((double **) NULL);
for (i=0; i < (ssize_t) number_rows; i++)
{
matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i]));
if (matrix[i] == (double *) NULL)
{
for (j=0; j < i; j++)
matrix[j]=(double *) RelinquishMagickMemory(matrix[j]);
matrix=(double **) RelinquishMagickMemory(matrix);
return((double **) NULL);
}
for (j=0; j < (ssize_t) size; j++)
matrix[i][j]=0.0;
}
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMatrixInfo() dereferences a matrix, deallocating memory associated
% with the matrix.
%
% The format of the DestroyImage method is:
%
% MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
LockSemaphoreInfo(matrix_info->semaphore);
switch (matrix_info->type)
{
case MemoryCache:
{
if (matrix_info->mapped == MagickFalse)
matrix_info->elements=RelinquishMagickMemory(matrix_info->elements);
else
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=(unsigned short *) NULL;
}
RelinquishMagickResource(MemoryResource,matrix_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=NULL;
RelinquishMagickResource(MapResource,matrix_info->length);
}
case DiskCache:
{
if (matrix_info->file != -1)
(void) close(matrix_info->file);
(void) RelinquishUniqueFileResource(matrix_info->path);
RelinquishMagickResource(DiskResource,matrix_info->length);
break;
}
default:
break;
}
UnlockSemaphoreInfo(matrix_info->semaphore);
DestroySemaphoreInfo(&matrix_info->semaphore);
return((MatrixInfo *) RelinquishMagickMemory(matrix_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s J o r d a n E l i m i n a t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussJordanElimination() returns a matrix in reduced row echelon form,
% while simultaneously reducing and thus solving the augumented results
% matrix.
%
% See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
% The format of the GaussJordanElimination method is:
%
% MagickBooleanType GaussJordanElimination(double **matrix,
% double **vectors,const size_t rank,const size_t number_vectors)
%
% A description of each parameter follows:
%
% o matrix: the matrix to be reduced, as an 'array of row pointers'.
%
% o vectors: the additional matrix argumenting the matrix for row reduction.
% Producing an 'array of column vectors'.
%
% o rank: The size of the matrix (both rows and columns). Also represents
% the number terms that need to be solved.
%
% o number_vectors: Number of vectors columns, argumenting the above matrix.
% Usually 1, but can be more for more complex equation solving.
%
% Note that the 'matrix' is given as a 'array of row pointers' of rank size.
% That is values can be assigned as matrix[row][column] where 'row' is
% typically the equation, and 'column' is the term of the equation.
% That is the matrix is in the form of a 'row first array'.
%
% However 'vectors' is a 'array of column pointers' which can have any number
% of columns, with each column array the same 'rank' size as 'matrix'.
%
% This allows for simpler handling of the results, especially is only one
% column 'vector' is all that is required to produce the desired solution.
%
% For example, the 'vectors' can consist of a pointer to a simple array of
% doubles. when only one set of simultanious equations is to be solved from
% the given set of coefficient weighted terms.
%
% double **matrix = AcquireMagickMatrix(8UL,8UL);
% double coefficents[8];
% ...
% GaussJordanElimination(matrix, &coefficents, 8UL, 1UL);
%
% However by specifing more 'columns' (as an 'array of vector columns', you
% can use this function to solve a set of 'separable' equations.
%
% For example a distortion function where u = U(x,y) v = V(x,y)
% And the functions U() and V() have separate coefficents, but are being
% generated from a common x,y->u,v data set.
%
% Another example is generation of a color gradient from a set of colors at
% specific coordients, such as a list x,y -> r,g,b,a.
%
% You can also use the 'vectors' to generate an inverse of the given 'matrix'
% though as a 'column first array' rather than a 'row first array'. For
% details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
*/
MagickExport MagickBooleanType GaussJordanElimination(double **matrix,
double **vectors,const size_t rank,const size_t number_vectors)
{
#define GaussJordanSwap(x,y) \
{ \
if ((x) != (y)) \
{ \
(x)+=(y); \
(y)=(x)-(y); \
(x)=(x)-(y); \
} \
}
double
max,
scale;
ssize_t
i,
j,
k;
ssize_t
column,
*columns,
*pivots,
row,
*rows;
columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns));
rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows));
pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots));
if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) ||
(pivots == (ssize_t *) NULL))
{
if (pivots != (ssize_t *) NULL)
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
if (columns != (ssize_t *) NULL)
columns=(ssize_t *) RelinquishMagickMemory(columns);
if (rows != (ssize_t *) NULL)
rows=(ssize_t *) RelinquishMagickMemory(rows);
return(MagickFalse);
}
(void) memset(columns,0,rank*sizeof(*columns));
(void) memset(rows,0,rank*sizeof(*rows));
(void) memset(pivots,0,rank*sizeof(*pivots));
column=0;
row=0;
for (i=0; i < (ssize_t) rank; i++)
{
max=0.0;
for (j=0; j < (ssize_t) rank; j++)
if (pivots[j] != 1)
{
for (k=0; k < (ssize_t) rank; k++)
if (pivots[k] != 0)
{
if (pivots[k] > 1)
return(MagickFalse);
}
else
if (fabs(matrix[j][k]) >= max)
{
max=fabs(matrix[j][k]);
row=j;
column=k;
}
}
pivots[column]++;
if (row != column)
{
for (k=0; k < (ssize_t) rank; k++)
GaussJordanSwap(matrix[row][k],matrix[column][k]);
for (k=0; k < (ssize_t) number_vectors; k++)
GaussJordanSwap(vectors[k][row],vectors[k][column]);
}
rows[i]=row;
columns[i]=column;
if (matrix[column][column] == 0.0)
return(MagickFalse); /* sigularity */
scale=PerceptibleReciprocal(matrix[column][column]);
matrix[column][column]=1.0;
for (j=0; j < (ssize_t) rank; j++)
matrix[column][j]*=scale;
for (j=0; j < (ssize_t) number_vectors; j++)
vectors[j][column]*=scale;
for (j=0; j < (ssize_t) rank; j++)
if (j != column)
{
scale=matrix[j][column];
matrix[j][column]=0.0;
for (k=0; k < (ssize_t) rank; k++)
matrix[j][k]-=scale*matrix[column][k];
for (k=0; k < (ssize_t) number_vectors; k++)
vectors[k][j]-=scale*vectors[k][column];
}
}
for (j=(ssize_t) rank-1; j >= 0; j--)
if (columns[j] != rows[j])
for (i=0; i < (ssize_t) rank; i++)
GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]);
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
rows=(ssize_t *) RelinquishMagickMemory(rows);
columns=(ssize_t *) RelinquishMagickMemory(columns);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x C o l u m n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixColumns() returns the number of columns in the matrix.
%
% The format of the GetMatrixColumns method is:
%
% size_t GetMatrixColumns(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->columns);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixElement() returns the specifed element in the matrix.
%
% The format of the GetMatrixElement method is:
%
% MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: return the matrix element in this buffer.
%
*/
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline MagickOffsetType ReadMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX));
#else
count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PREAD)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+
EdgeX(x,matrix_info->columns);
if (matrix_info->type != DiskCache)
{
(void) memcpy(value,(unsigned char *) matrix_info->elements+i*
matrix_info->stride,matrix_info->stride);
return(MagickTrue);
}
count=ReadMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x R o w s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixRows() returns the number of rows in the matrix.
%
% The format of the GetMatrixRows method is:
%
% size_t GetMatrixRows(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info)
{
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->rows);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e a s t S q u a r e s A d d T e r m s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LeastSquaresAddTerms() adds one set of terms and associate results to the
% given matrix and vectors for solving using least-squares function fitting.
%
% The format of the AcquireMagickMatrix method is:
%
% void LeastSquaresAddTerms(double **matrix,double **vectors,
% const double *terms,const double *results,const size_t rank,
% const size_t number_vectors);
%
% A description of each parameter follows:
%
% o matrix: the square matrix to add given terms/results to.
%
% o vectors: the result vectors to add terms/results to.
%
% o terms: the pre-calculated terms (without the unknown coefficent
% weights) that forms the equation being added.
%
% o results: the result(s) that should be generated from the given terms
% weighted by the yet-to-be-solved coefficents.
%
% o rank: the rank or size of the dimensions of the square matrix.
% Also the length of vectors, and number of terms being added.
%
% o number_vectors: Number of result vectors, and number or results being
% added. Also represents the number of separable systems of equations
% that is being solved.
%
% Example of use...
%
% 2 dimensional Affine Equations (which are separable)
% c0*x + c2*y + c4*1 => u
% c1*x + c3*y + c5*1 => v
%
% double **matrix = AcquireMagickMatrix(3UL,3UL);
% double **vectors = AcquireMagickMatrix(2UL,3UL);
% double terms[3], results[2];
% ...
% for each given x,y -> u,v
% terms[0] = x;
% terms[1] = y;
% terms[2] = 1;
% results[0] = u;
% results[1] = v;
% LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL);
% ...
% if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) {
% c0 = vectors[0][0];
% c2 = vectors[0][1];
% c4 = vectors[0][2];
% c1 = vectors[1][0];
% c3 = vectors[1][1];
% c5 = vectors[1][2];
% }
% else
% printf("Matrix unsolvable\n);
% RelinquishMagickMatrix(matrix,3UL);
% RelinquishMagickMatrix(vectors,2UL);
%
*/
MagickExport void LeastSquaresAddTerms(double **matrix,double **vectors,
const double *terms,const double *results,const size_t rank,
const size_t number_vectors)
{
ssize_t
i,
j;
for (j=0; j < (ssize_t) rank; j++)
{
for (i=0; i < (ssize_t) rank; i++)
matrix[i][j]+=terms[i]*terms[j];
for (i=0; i < (ssize_t) number_vectors; i++)
vectors[i][j]+=results[i]*terms[j];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t r i x T o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatrixToImage() returns a matrix as an image. The matrix elements must be
% of type double otherwise nonsense is returned.
%
% The format of the MatrixToImage method is:
%
% Image *MatrixToImage(const MatrixInfo *matrix_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
max_value,
min_value,
scale_factor,
value;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (matrix_info->stride < sizeof(double))
return((Image *) NULL);
/*
Determine range of matrix.
*/
(void) GetMatrixElement(matrix_info,0,0,&value);
min_value=value;
max_value=value;
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) matrix_info->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
if (value < min_value)
min_value=value;
else
if (value > max_value)
max_value=value;
}
}
if ((min_value == 0.0) && (max_value == 0.0))
scale_factor=0;
else
if (min_value == max_value)
{
scale_factor=(double) QuantumRange/min_value;
min_value=0;
}
else
scale_factor=(double) QuantumRange/(max_value-min_value);
/*
Convert matrix to image.
*/
image=AcquireImage((ImageInfo *) NULL);
image->columns=matrix_info->columns;
image->rows=matrix_info->rows;
image->colorspace=GRAYColorspace;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
value;
PixelPacket
*q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
value=scale_factor*(value-min_value);
q->red=ClampToQuantum(value);
q->green=q->red;
q->blue=q->red;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N u l l M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NullMatrix() sets all elements of the matrix to zero.
%
% The format of the memset method is:
%
% MagickBooleanType *NullMatrix(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info)
{
ssize_t
x;
ssize_t
count,
y;
unsigned char
value;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
if (matrix_info->type != DiskCache)
{
(void) memset(matrix_info->elements,0,(size_t)
matrix_info->length);
return(MagickTrue);
}
value=0;
(void) lseek(matrix_info->file,0,SEEK_SET);
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
for (x=0; x < (ssize_t) matrix_info->length; x++)
{
count=write(matrix_info->file,&value,sizeof(value));
if (count != (ssize_t) sizeof(value))
break;
}
if (x < (ssize_t) matrix_info->length)
break;
}
return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n q u i s h M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishMagickMatrix() frees the previously acquired matrix (array of
% pointers to arrays of doubles).
%
% The format of the RelinquishMagickMatrix method is:
%
% double **RelinquishMagickMatrix(double **matrix,
% const size_t number_rows)
%
% A description of each parameter follows:
%
% o matrix: the matrix to relinquish
%
% o number_rows: the first dimension of the acquired matrix (number of
% pointers)
%
*/
MagickExport double **RelinquishMagickMatrix(double **matrix,
const size_t number_rows)
{
ssize_t
i;
if (matrix == (double **) NULL )
return(matrix);
for (i=0; i < (ssize_t) number_rows; i++)
matrix[i]=(double *) RelinquishMagickMemory(matrix[i]);
matrix=(double **) RelinquishMagickMemory(matrix);
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMatrixElement() sets the specifed element in the matrix.
%
% The format of the SetMatrixElement method is:
%
% MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: set the matrix element to this value.
%
*/
MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,const void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) y*matrix_info->columns+x;
if ((i < 0) ||
((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length))
return(MagickFalse);
if (matrix_info->type != DiskCache)
{
(void) memcpy((unsigned char *) matrix_info->elements+i*
matrix_info->stride,value,matrix_info->stride);
return(MagickTrue);
}
count=WriteMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-11,12)),ceild(4*t2-Nz-20,24));t3<=min(min(min(floord(4*t2+Ny,24),floord(Nt+Ny-4,24)),floord(2*t1+Ny+1,24)),floord(4*t1-4*t2+Nz+Ny-1,24));t3++) {
for (t4=max(max(max(0,ceild(t1-127,128)),ceild(4*t2-Nz-252,256)),ceild(24*t3-Ny-252,256));t4<=min(min(min(min(floord(4*t2+Nx,256),floord(Nt+Nx-4,256)),floord(2*t1+Nx+1,256)),floord(24*t3+Nx+20,256)),floord(4*t1-4*t2+Nz+Nx-1,256));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),24*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),24*t3+22),256*t4+254),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
convolution_3x3_int8.h | // SenseNets is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2018 SenseNets Technology Ltd. All rights reserved.
// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv3x3s1_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(4*9, inch, outch/4 + outch%4, (size_t)1u);
const signed char* kernel = _kernel;
int p=0;
for (; p+3<outch; p+=4)
{
const signed char* k0 = kernel + (p+0)*inch*9;
const signed char* k1 = kernel + (p+1)*inch*9;
const signed char* k2 = kernel + (p+2)*inch*9;
const signed char* k3 = kernel + (p+3)*inch*9;
signed char* ktmp = kernel_tm.channel(p/4);
for (int q=0; q<inch; q++)
{
for (int k=0; k<9; k++)
{
ktmp[0] = k0[k];
ktmp[1] = k1[k];
ktmp[2] = k2[k];
ktmp[3] = k3[k];
ktmp += 4;
}
k0 += 9;
k1 += 9;
k2 += 9;
k3 += 9;
}
}
for (; p<outch; p++)
{
const signed char* k0 = kernel + (p+0)*inch*9;
signed char* ktmp = kernel_tm.channel(p/4 + p%4);
for (int q=0; q<inch; q++)
{
for (int k=0; k<9; k++)
{
ktmp[k] = k0[k];
}
ktmp += 9;
k0 += 9;
}
}
}
#if __aarch64__
static void conv3x3s1_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char* kernel = _kernel;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
out0.fill(0);
out1.fill(0);
const signed char* kernel0 = (const signed char *)kernel + p * inch * 9;
const signed char* kernel1 = (const signed char *)kernel + (p + 1) * inch * 9;
for (int q=0; q<inch; q++)
{
int* outptr0 = out0;
int* outptr1 = out1;
int* outptr0n = outptr0 + outw;
int* outptr1n = outptr1 + outw;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
const signed char* r3 = img0 + w * 3;
int i = 0;
int8x8_t _k00 = vdup_n_s8(kernel0[0]);
int8x8_t _k01 = vdup_n_s8(kernel0[1]);
int8x8_t _k02 = vdup_n_s8(kernel0[2]);
int8x8_t _k03 = vdup_n_s8(kernel0[3]);
int8x8_t _k04 = vdup_n_s8(kernel0[4]);
int8x8_t _k05 = vdup_n_s8(kernel0[5]);
int8x8_t _k06 = vdup_n_s8(kernel0[6]);
int8x8_t _k07 = vdup_n_s8(kernel0[7]);
int8x8_t _k08 = vdup_n_s8(kernel0[8]);
int8x8_t _k10 = vdup_n_s8(kernel1[0]);
int8x8_t _k11 = vdup_n_s8(kernel1[1]);
int8x8_t _k12 = vdup_n_s8(kernel1[2]);
int8x8_t _k13 = vdup_n_s8(kernel1[3]);
int8x8_t _k14 = vdup_n_s8(kernel1[4]);
int8x8_t _k15 = vdup_n_s8(kernel1[5]);
int8x8_t _k16 = vdup_n_s8(kernel1[6]);
int8x8_t _k17 = vdup_n_s8(kernel1[7]);
int8x8_t _k18 = vdup_n_s8(kernel1[8]);
for (; i+1 < outh; i+=2)
{
int nn = outw >> 3;
int remain = outw & 7;
for (; nn > 0; nn--)
{
// outch 0
int8x8_t _r0 = vld1_s8(r0);
int8x8_t _r0n = vld1_s8(r0+8);
int8x8_t _r01 = vext_s8(_r0, _r0n, 1);
int8x8_t _r02 = vext_s8(_r0, _r0n, 2);
int16x8_t _sum0 = vmull_s8(_r0, _k00);
_sum0 = vmlal_s8(_sum0, _r01, _k01);
_sum0 = vmlal_s8(_sum0, _r02, _k02);
int8x8_t _r1 = vld1_s8(r1);
int8x8_t _r1n = vld1_s8(r1+8);
int8x8_t _r11 = vext_s8(_r1, _r1n, 1);
int8x8_t _r12 = vext_s8(_r1, _r1n, 2);
_sum0 = vmlal_s8(_sum0, _r1, _k03);
_sum0 = vmlal_s8(_sum0, _r11, _k04);
_sum0 = vmlal_s8(_sum0, _r12, _k05);
int16x8_t _sum1 = vmull_s8(_r1, _k00);
_sum1 = vmlal_s8(_sum1, _r11, _k01);
_sum1 = vmlal_s8(_sum1, _r12, _k02);
int8x8_t _r2 = vld1_s8(r2);
int8x8_t _r2n = vld1_s8(r2+8);
int8x8_t _r21 = vext_s8(_r2, _r2n, 1);
int8x8_t _r22 = vext_s8(_r2, _r2n, 2);
_sum0 = vmlal_s8(_sum0, _r2, _k06);
_sum0 = vmlal_s8(_sum0, _r21, _k07);
_sum0 = vmlal_s8(_sum0, _r22, _k08);
_sum1 = vmlal_s8(_sum1, _r2, _k03);
_sum1 = vmlal_s8(_sum1, _r21, _k04);
_sum1 = vmlal_s8(_sum1, _r22, _k05);
int8x8_t _r3 = vld1_s8(r3);
int8x8_t _r3n = vld1_s8(r3+8);
int8x8_t _r31 = vext_s8(_r3, _r3n, 1);
int8x8_t _r32 = vext_s8(_r3, _r3n, 2);
_sum1 = vmlal_s8(_sum1, _r3, _k06);
_sum1 = vmlal_s8(_sum1, _r31, _k07);
_sum1 = vmlal_s8(_sum1, _r32, _k08);
int32x4_t sum0_s32 = vld1q_s32(outptr0);
int32x4_t sum0n_s32 = vld1q_s32(outptr0+4);
sum0_s32 = vaddw_s16(sum0_s32, vget_low_s16(_sum0));
sum0n_s32 = vaddw_s16(sum0n_s32, vget_high_s16(_sum0));
vst1q_s32(outptr0, sum0_s32);
vst1q_s32(outptr0+4, sum0n_s32);
int32x4_t sum1_s32 = vld1q_s32(outptr0n);
int32x4_t sum1n_s32 = vld1q_s32(outptr0n+4);
sum1_s32 = vaddw_s16(sum1_s32, vget_low_s16(_sum1));
sum1n_s32 = vaddw_s16(sum1n_s32, vget_high_s16(_sum1));
vst1q_s32(outptr0n, sum1_s32);
vst1q_s32(outptr0n+4, sum1n_s32);
// outch 1
_sum0 = vmull_s8(_r0, _k10);
_sum0 = vmlal_s8(_sum0, _r01, _k11);
_sum0 = vmlal_s8(_sum0, _r02, _k12);
_sum0 = vmlal_s8(_sum0, _r1, _k13);
_sum0 = vmlal_s8(_sum0, _r11, _k14);
_sum0 = vmlal_s8(_sum0, _r12, _k15);
_sum0 = vmlal_s8(_sum0, _r2, _k16);
_sum0 = vmlal_s8(_sum0, _r21, _k17);
_sum0 = vmlal_s8(_sum0, _r22, _k18);
_sum1 = vmull_s8(_r1, _k10);
_sum1 = vmlal_s8(_sum1, _r11, _k11);
_sum1 = vmlal_s8(_sum1, _r12, _k12);
_sum1 = vmlal_s8(_sum1, _r2, _k13);
_sum1 = vmlal_s8(_sum1, _r21, _k14);
_sum1 = vmlal_s8(_sum1, _r22, _k15);
_sum1 = vmlal_s8(_sum1, _r3, _k16);
_sum1 = vmlal_s8(_sum1, _r31, _k17);
_sum1 = vmlal_s8(_sum1, _r32, _k18);
sum0_s32 = vld1q_s32(outptr1);
sum0n_s32 = vld1q_s32(outptr1+4);
sum0_s32 = vaddw_s16(sum0_s32, vget_low_s16(_sum0));
sum0n_s32 = vaddw_s16(sum0n_s32, vget_high_s16(_sum0));
vst1q_s32(outptr1, sum0_s32);
vst1q_s32(outptr1+4, sum0n_s32);
sum1_s32 = vld1q_s32(outptr1n);
sum1n_s32 = vld1q_s32(outptr1n+4);
sum1_s32 = vaddw_s16(sum1_s32, vget_low_s16(_sum1));
sum1n_s32 = vaddw_s16(sum1n_s32, vget_high_s16(_sum1));
vst1q_s32(outptr1n, sum1_s32);
vst1q_s32(outptr1n+4, sum1n_s32);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
outptr0 += 8;
outptr1 += 8;
outptr0n += 8;
outptr1n += 8;
}
for (; remain>0; remain--)
{
int sum0 = 0;
int sum0n = 0;
int sum1 = 0;
int sum1n = 0;
//ToDo Neon
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
sum1 += (int)r0[0] * kernel1[0];
sum1 += (int)r0[1] * kernel1[1];
sum1 += (int)r0[2] * kernel1[2];
sum1 += (int)r1[0] * kernel1[3];
sum1 += (int)r1[1] * kernel1[4];
sum1 += (int)r1[2] * kernel1[5];
sum1 += (int)r2[0] * kernel1[6];
sum1 += (int)r2[1] * kernel1[7];
sum1 += (int)r2[2] * kernel1[8];
sum0n += (int)r1[0] * kernel0[0];
sum0n += (int)r1[1] * kernel0[1];
sum0n += (int)r1[2] * kernel0[2];
sum0n += (int)r2[0] * kernel0[3];
sum0n += (int)r2[1] * kernel0[4];
sum0n += (int)r2[2] * kernel0[5];
sum0n += (int)r3[0] * kernel0[6];
sum0n += (int)r3[1] * kernel0[7];
sum0n += (int)r3[2] * kernel0[8];
sum1n += (int)r1[0] * kernel1[0];
sum1n += (int)r1[1] * kernel1[1];
sum1n += (int)r1[2] * kernel1[2];
sum1n += (int)r2[0] * kernel1[3];
sum1n += (int)r2[1] * kernel1[4];
sum1n += (int)r2[2] * kernel1[5];
sum1n += (int)r3[0] * kernel1[6];
sum1n += (int)r3[1] * kernel1[7];
sum1n += (int)r3[2] * kernel1[8];
*outptr0 += sum0;
*outptr1 += sum1;
*outptr0n += sum0n;
*outptr1n += sum1n;
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr1++;
outptr0n++;
outptr1n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr1 += outw;
outptr0n += outw;
outptr1n += outw;
}
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
for (; nn > 0; nn--)
{
// outch 0
int8x8_t _r0 = vld1_s8(r0);
int8x8_t _r0n = vld1_s8(r0+8);
int8x8_t _r01 = vext_s8(_r0, _r0n, 1);
int8x8_t _r02 = vext_s8(_r0, _r0n, 2);
int16x8_t _sum0 = vmull_s8(_r0, _k00);
_sum0 = vmlal_s8(_sum0, _r01, _k01);
_sum0 = vmlal_s8(_sum0, _r02, _k02);
int8x8_t _r1 = vld1_s8(r1);
int8x8_t _r1n = vld1_s8(r1+8);
int8x8_t _r11 = vext_s8(_r1, _r1n, 1);
int8x8_t _r12 = vext_s8(_r1, _r1n, 2);
_sum0 = vmlal_s8(_sum0, _r1, _k03);
_sum0 = vmlal_s8(_sum0, _r11, _k04);
_sum0 = vmlal_s8(_sum0, _r12, _k05);
int8x8_t _r2 = vld1_s8(r2);
int8x8_t _r2n = vld1_s8(r2+8);
int8x8_t _r21 = vext_s8(_r2, _r2n, 1);
int8x8_t _r22 = vext_s8(_r2, _r2n, 2);
_sum0 = vmlal_s8(_sum0, _r2, _k06);
_sum0 = vmlal_s8(_sum0, _r21, _k07);
_sum0 = vmlal_s8(_sum0, _r22, _k08);
int32x4_t sum0_s32 = vld1q_s32(outptr0);
int32x4_t sum0n_s32 = vld1q_s32(outptr0+4);
sum0_s32 = vaddw_s16(sum0_s32, vget_low_s16(_sum0));
sum0n_s32 = vaddw_s16(sum0n_s32, vget_high_s16(_sum0));
vst1q_s32(outptr0, sum0_s32);
vst1q_s32(outptr0+4, sum0n_s32);
// outch 1
_sum0 = vmull_s8(_r0, _k10);
_sum0 = vmlal_s8(_sum0, _r01, _k11);
_sum0 = vmlal_s8(_sum0, _r02, _k12);
_sum0 = vmlal_s8(_sum0, _r1, _k13);
_sum0 = vmlal_s8(_sum0, _r11, _k14);
_sum0 = vmlal_s8(_sum0, _r12, _k15);
_sum0 = vmlal_s8(_sum0, _r2, _k16);
_sum0 = vmlal_s8(_sum0, _r21, _k17);
_sum0 = vmlal_s8(_sum0, _r22, _k18);
sum0_s32 = vld1q_s32(outptr1);
sum0n_s32 = vld1q_s32(outptr1+4);
sum0_s32 = vaddw_s16(sum0_s32, vget_low_s16(_sum0));
sum0n_s32 = vaddw_s16(sum0n_s32, vget_high_s16(_sum0));
vst1q_s32(outptr1, sum0_s32);
vst1q_s32(outptr1+4, sum0n_s32);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 8;
outptr1 += 8;
}
for (; remain>0; remain--)
{
int sum0 = 0;
int sum1 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
sum1 += (int)r0[0] * kernel1[0];
sum1 += (int)r0[1] * kernel1[1];
sum1 += (int)r0[2] * kernel1[2];
sum1 += (int)r1[0] * kernel1[3];
sum1 += (int)r1[1] * kernel1[4];
sum1 += (int)r1[2] * kernel1[5];
sum1 += (int)r2[0] * kernel1[6];
sum1 += (int)r2[1] * kernel1[7];
sum1 += (int)r2[2] * kernel1[8];
*outptr0 += sum0;
*outptr1 += sum1;
r0++;
r1++;
r2++;
outptr0++;
outptr1++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
kernel1 += 9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char* kernel0 = (const signed char *)kernel + p * inch * 9;
for (int q=0; q<inch; q++)
{
int* outptr0 = out0;
int* outptr0n = outptr0 + outw;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
const signed char* r3 = img0 + w * 3;
int i = 0;
int8x8_t _k00 = vdup_n_s8(kernel0[0]);
int8x8_t _k01 = vdup_n_s8(kernel0[1]);
int8x8_t _k02 = vdup_n_s8(kernel0[2]);
int8x8_t _k03 = vdup_n_s8(kernel0[3]);
int8x8_t _k04 = vdup_n_s8(kernel0[4]);
int8x8_t _k05 = vdup_n_s8(kernel0[5]);
int8x8_t _k06 = vdup_n_s8(kernel0[6]);
int8x8_t _k07 = vdup_n_s8(kernel0[7]);
int8x8_t _k08 = vdup_n_s8(kernel0[8]);
for (; i+1 < outh; i+=2)
{
int nn = outw >> 3;
int remain = outw & 7;
for (; nn > 0; nn--)
{
int8x8_t _r0 = vld1_s8(r0);
int8x8_t _r0n = vld1_s8(r0+8);
int8x8_t _r01 = vext_s8(_r0, _r0n, 1);
int8x8_t _r02 = vext_s8(_r0, _r0n, 2);
int16x8_t _sum0 = vmull_s8(_r0, _k00);
_sum0 = vmlal_s8(_sum0, _r01, _k01);
_sum0 = vmlal_s8(_sum0, _r02, _k02);
int8x8_t _r1 = vld1_s8(r1);
int8x8_t _r1n = vld1_s8(r1+8);
int8x8_t _r11 = vext_s8(_r1, _r1n, 1);
int8x8_t _r12 = vext_s8(_r1, _r1n, 2);
_sum0 = vmlal_s8(_sum0, _r1, _k03);
_sum0 = vmlal_s8(_sum0, _r11, _k04);
_sum0 = vmlal_s8(_sum0, _r12, _k05);
int16x8_t _sum1 = vmull_s8(_r1, _k00);
_sum1 = vmlal_s8(_sum1, _r11, _k01);
_sum1 = vmlal_s8(_sum1, _r12, _k02);
int8x8_t _r2 = vld1_s8(r2);
int8x8_t _r2n = vld1_s8(r2+8);
int8x8_t _r21 = vext_s8(_r2, _r2n, 1);
int8x8_t _r22 = vext_s8(_r2, _r2n, 2);
_sum0 = vmlal_s8(_sum0, _r2, _k06);
_sum0 = vmlal_s8(_sum0, _r21, _k07);
_sum0 = vmlal_s8(_sum0, _r22, _k08);
_sum1 = vmlal_s8(_sum1, _r2, _k03);
_sum1 = vmlal_s8(_sum1, _r21, _k04);
_sum1 = vmlal_s8(_sum1, _r22, _k05);
int8x8_t _r3 = vld1_s8(r3);
int8x8_t _r3n = vld1_s8(r3+8);
int8x8_t _r31 = vext_s8(_r3, _r3n, 1);
int8x8_t _r32 = vext_s8(_r3, _r3n, 2);
_sum1 = vmlal_s8(_sum1, _r3, _k06);
_sum1 = vmlal_s8(_sum1, _r31, _k07);
_sum1 = vmlal_s8(_sum1, _r32, _k08);
int32x4_t sum0_s32 = vld1q_s32(outptr0);
int32x4_t sum0n_s32 = vld1q_s32(outptr0+4);
sum0_s32 = vaddw_s16(sum0_s32, vget_low_s16(_sum0));
sum0n_s32 = vaddw_s16(sum0n_s32, vget_high_s16(_sum0));
vst1q_s32(outptr0, sum0_s32);
vst1q_s32(outptr0+4, sum0n_s32);
int32x4_t sum1_s32 = vld1q_s32(outptr0n);
int32x4_t sum1n_s32 = vld1q_s32(outptr0n+4);
sum1_s32 = vaddw_s16(sum1_s32, vget_low_s16(_sum1));
sum1n_s32 = vaddw_s16(sum1n_s32, vget_high_s16(_sum1));
vst1q_s32(outptr0n, sum1_s32);
vst1q_s32(outptr0n+4, sum1n_s32);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
outptr0 += 8;
outptr0n += 8;
}
for (; remain>0; remain--)
{
// Todo neon
int sum0 = 0;
int sum0n = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
sum0n += (int)r1[0] * kernel0[0];
sum0n += (int)r1[1] * kernel0[1];
sum0n += (int)r1[2] * kernel0[2];
sum0n += (int)r2[0] * kernel0[3];
sum0n += (int)r2[1] * kernel0[4];
sum0n += (int)r2[2] * kernel0[5];
sum0n += (int)r3[0] * kernel0[6];
sum0n += (int)r3[1] * kernel0[7];
sum0n += (int)r3[2] * kernel0[8];
*outptr0 += sum0;
*outptr0n += sum0n;
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr0n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr0n += outw;
}
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
for (; nn > 0; nn--)
{
int8x8_t _r0 = vld1_s8(r0);
int8x8_t _r0n = vld1_s8(r0+8);
int8x8_t _r01 = vext_s8(_r0, _r0n, 1);
int8x8_t _r02 = vext_s8(_r0, _r0n, 2);
int16x8_t _sum0 = vmull_s8(_r0, _k00);
_sum0 = vmlal_s8(_sum0, _r01, _k01);
_sum0 = vmlal_s8(_sum0, _r02, _k02);
int8x8_t _r1 = vld1_s8(r1);
int8x8_t _r1n = vld1_s8(r1+8);
int8x8_t _r11 = vext_s8(_r1, _r1n, 1);
int8x8_t _r12 = vext_s8(_r1, _r1n, 2);
_sum0 = vmlal_s8(_sum0, _r1, _k03);
_sum0 = vmlal_s8(_sum0, _r11, _k04);
_sum0 = vmlal_s8(_sum0, _r12, _k05);
int8x8_t _r2 = vld1_s8(r2);
int8x8_t _r2n = vld1_s8(r2+8);
int8x8_t _r21 = vext_s8(_r2, _r2n, 1);
int8x8_t _r22 = vext_s8(_r2, _r2n, 2);
_sum0 = vmlal_s8(_sum0, _r2, _k06);
_sum0 = vmlal_s8(_sum0, _r21, _k07);
_sum0 = vmlal_s8(_sum0, _r22, _k08);
int32x4_t sum0_s32 = vld1q_s32(outptr0);
int32x4_t sum0n_s32 = vld1q_s32(outptr0+4);
sum0_s32 = vaddw_s16(sum0_s32, vget_low_s16(_sum0));
sum0n_s32 = vaddw_s16(sum0n_s32, vget_high_s16(_sum0));
vst1q_s32(outptr0, sum0_s32);
vst1q_s32(outptr0+4, sum0n_s32);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 8;
}
for (; remain>0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
}
static void conv3x3s2_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const signed char* kernel = _kernel;
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp < nn_outch; pp++)
{
int p = pp * 4;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p + 1);
Mat out2 = top_blob.channel(p + 2);
Mat out3 = top_blob.channel(p + 3);
out0.fill(0.f);
out1.fill(0.f);
out2.fill(0.f);
out3.fill(0.f);
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9;
const signed char* kernel1 = (const signed char*)kernel + (p + 1) * inch * 9;
const signed char* kernel2 = (const signed char*)kernel + (p + 2) * inch * 9;
const signed char* kernel3 = (const signed char*)kernel + (p + 3) * inch * 9;
for (int q=0; q<inch; q++)
{
int* outptr0 = out0;
int* outptr1 = out1;
int* outptr2 = out2;
int* outptr3 = out3;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
int i = 0;
int8x16_t _k0 = vld1q_s8(kernel0);
int8x16_t _k1 = vld1q_s8(kernel1);
int8x16_t _k2 = vld1q_s8(kernel2);
int8x16_t _k3 = vld1q_s8(kernel3);
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"prfm pldl1keep, [%5, #128] \n"
"ld2 {v4.8b, v5.8b}, [%5], #16 \n"
"ld2 {v6.8b, v7.8b}, [%5] \n"
"ext v8.8b, v4.8b, v6.8b, #1 \n"
"dup v9.8b, %16.b[0] \n"
"dup v10.8b, %17.b[0] \n"
"dup v11.8b, %18.b[0] \n"
"dup v12.8b, %19.b[0] \n"
"smull v13.8h, v4.8b, v9.8b \n"
"smull v14.8h, v4.8b, v10.8b \n"
"smull v15.8h, v4.8b, v11.8b \n"
"smull v16.8h, v4.8b, v12.8b \n"
"dup v9.8b, %16.b[1] \n"
"dup v10.8b, %17.b[1] \n"
"dup v11.8b, %18.b[1] \n"
"dup v12.8b, %19.b[1] \n"
"smlal v13.8h, v5.8b, v9.8b \n"
"smlal v14.8h, v5.8b, v10.8b \n"
"smlal v15.8h, v5.8b, v11.8b \n"
"smlal v16.8h, v5.8b, v12.8b \n"
"dup v9.8b, %16.b[2] \n"
"dup v10.8b, %17.b[2] \n"
"dup v11.8b, %18.b[2] \n"
"dup v12.8b, %19.b[2] \n"
"smlal v13.8h, v8.8b, v9.8b \n"
"smlal v14.8h, v8.8b, v10.8b \n"
"smlal v15.8h, v8.8b, v11.8b \n"
"smlal v16.8h, v8.8b, v12.8b \n"
// r1
"prfm pldl1keep, [%6, #128] \n"
"ld2 {v4.8b, v5.8b}, [%6], #16 \n"
"ld2 {v6.8b, v7.8b}, [%6] \n"
"ext v8.8b, v4.8b, v6.8b, #1 \n"
"dup v9.8b, %16.b[3] \n"
"dup v10.8b, %17.b[3] \n"
"dup v11.8b, %18.b[3] \n"
"dup v12.8b, %19.b[3] \n"
"smlal v13.8h, v4.8b, v9.8b \n"
"smlal v14.8h, v4.8b, v10.8b \n"
"smlal v15.8h, v4.8b, v11.8b \n"
"smlal v16.8h, v4.8b, v12.8b \n"
"dup v9.8b, %16.b[4] \n"
"dup v10.8b, %17.b[4] \n"
"dup v11.8b, %18.b[4] \n"
"dup v12.8b, %19.b[4] \n"
"smlal v13.8h, v5.8b, v9.8b \n"
"smlal v14.8h, v5.8b, v10.8b \n"
"smlal v15.8h, v5.8b, v11.8b \n"
"smlal v16.8h, v5.8b, v12.8b \n"
"dup v9.8b, %16.b[5] \n"
"dup v10.8b, %17.b[5] \n"
"dup v11.8b, %18.b[5] \n"
"dup v12.8b, %19.b[5] \n"
"smlal v13.8h, v8.8b, v9.8b \n"
"smlal v14.8h, v8.8b, v10.8b \n"
"smlal v15.8h, v8.8b, v11.8b \n"
"smlal v16.8h, v8.8b, v12.8b \n"
// r2
"prfm pldl1keep, [%7, #128] \n"
"ld2 {v4.8b, v5.8b}, [%7], #16 \n"
"ld2 {v6.8b, v7.8b}, [%7] \n"
"ext v8.8b, v4.8b, v6.8b, #1 \n"
"dup v9.8b, %16.b[6] \n"
"dup v10.8b, %17.b[6] \n"
"dup v11.8b, %18.b[6] \n"
"dup v12.8b, %19.b[6] \n"
"smlal v13.8h, v4.8b, v9.8b \n"
"smlal v14.8h, v4.8b, v10.8b \n"
"smlal v15.8h, v4.8b, v11.8b \n"
"smlal v16.8h, v4.8b, v12.8b \n"
"dup v9.8b, %16.b[7] \n"
"dup v10.8b, %17.b[7] \n"
"dup v11.8b, %18.b[7] \n"
"dup v12.8b, %19.b[7] \n"
"smlal v13.8h, v5.8b, v9.8b \n"
"smlal v14.8h, v5.8b, v10.8b \n"
"smlal v15.8h, v5.8b, v11.8b \n"
"smlal v16.8h, v5.8b, v12.8b \n"
"dup v9.8b, %16.b[8] \n"
"dup v10.8b, %17.b[8] \n"
"dup v11.8b, %18.b[8] \n"
"dup v12.8b, %19.b[8] \n"
"smlal v13.8h, v8.8b, v9.8b \n"
"smlal v14.8h, v8.8b, v10.8b \n"
"smlal v15.8h, v8.8b, v11.8b \n"
"smlal v16.8h, v8.8b, v12.8b \n"
// sum0 - sum3
"prfm pldl1keep, [%1, #128] \n"
"prfm pldl1keep, [%2, #128] \n"
"prfm pldl1keep, [%3, #128] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v17.4s, v18.4s}, [%1] \n"
"ld1 {v19.4s, v20.4s}, [%2] \n"
"ld1 {v21.4s, v22.4s}, [%3] \n"
"ld1 {v23.4s, v24.4s}, [%4] \n"
"saddw v17.4s, v17.4s, v13.4h \n"
"saddw2 v18.4s, v18.4s, v13.8h \n"
"saddw v19.4s, v19.4s, v14.4h \n"
"saddw2 v20.4s, v20.4s, v14.8h \n"
"saddw v21.4s, v21.4s, v15.4h \n"
"saddw2 v22.4s, v22.4s, v15.8h \n"
"saddw v23.4s, v23.4s, v16.4h \n"
"saddw2 v24.4s, v24.4s, v16.8h \n"
"st1 {v17.4s, v18.4s}, [%1], #32\n"
"st1 {v19.4s, v20.4s}, [%2], #32\n"
"st1 {v21.4s, v22.4s}, [%3], #32\n"
"st1 {v23.4s, v24.4s}, [%4], #32\n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), //%0
"=r"(outptr0), //%1
"=r"(outptr1), //%2
"=r"(outptr2), //%3
"=r"(outptr3), //%4
"=r"(r0), //%5
"=r"(r1), //%6
"=r"(r2) //%7
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(r0),
"6"(r1),
"7"(r2),
"w"(_k0), //%16
"w"(_k1), //%17
"w"(_k2), //%18
"w"(_k3) //%19
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24"
);
}
if (remain >= 4)
{
remain -= 4;
asm volatile(
// r0
"prfm pldl1keep, [%5, #128] \n"
"ld2 {v4.8b, v5.8b}, [%5], #16 \n"
"ld2 {v6.8b, v7.8b}, [%5] \n"
"ext v8.8b, v4.8b, v6.8b, #1 \n"
"dup v9.8b, %16.b[0] \n"
"dup v10.8b, %17.b[0] \n"
"dup v11.8b, %18.b[0] \n"
"dup v12.8b, %19.b[0] \n"
"smull v13.8h, v4.8b, v9.8b \n"
"smull v14.8h, v4.8b, v10.8b \n"
"smull v15.8h, v4.8b, v11.8b \n"
"smull v16.8h, v4.8b, v12.8b \n"
"dup v9.8b, %16.b[1] \n"
"dup v10.8b, %17.b[1] \n"
"dup v11.8b, %18.b[1] \n"
"dup v12.8b, %19.b[1] \n"
"smlal v13.8h, v5.8b, v9.8b \n"
"smlal v14.8h, v5.8b, v10.8b \n"
"smlal v15.8h, v5.8b, v11.8b \n"
"smlal v16.8h, v5.8b, v12.8b \n"
"dup v9.8b, %16.b[2] \n"
"dup v10.8b, %17.b[2] \n"
"dup v11.8b, %18.b[2] \n"
"dup v12.8b, %19.b[2] \n"
"smlal v13.8h, v8.8b, v9.8b \n"
"smlal v14.8h, v8.8b, v10.8b \n"
"smlal v15.8h, v8.8b, v11.8b \n"
"smlal v16.8h, v8.8b, v12.8b \n"
// r1
"prfm pldl1keep, [%6, #128] \n"
"ld2 {v4.8b, v5.8b}, [%6], #16 \n"
"ld2 {v6.8b, v7.8b}, [%6] \n"
"ext v8.8b, v4.8b, v6.8b, #1 \n"
"dup v9.8b, %16.b[3] \n"
"dup v10.8b, %17.b[3] \n"
"dup v11.8b, %18.b[3] \n"
"dup v12.8b, %19.b[3] \n"
"smlal v13.8h, v4.8b, v9.8b \n"
"smlal v14.8h, v4.8b, v10.8b \n"
"smlal v15.8h, v4.8b, v11.8b \n"
"smlal v16.8h, v4.8b, v12.8b \n"
"dup v9.8b, %16.b[4] \n"
"dup v10.8b, %17.b[4] \n"
"dup v11.8b, %18.b[4] \n"
"dup v12.8b, %19.b[4] \n"
"smlal v13.8h, v5.8b, v9.8b \n"
"smlal v14.8h, v5.8b, v10.8b \n"
"smlal v15.8h, v5.8b, v11.8b \n"
"smlal v16.8h, v5.8b, v12.8b \n"
"dup v9.8b, %16.b[5] \n"
"dup v10.8b, %17.b[5] \n"
"dup v11.8b, %18.b[5] \n"
"dup v12.8b, %19.b[5] \n"
"smlal v13.8h, v8.8b, v9.8b \n"
"smlal v14.8h, v8.8b, v10.8b \n"
"smlal v15.8h, v8.8b, v11.8b \n"
"smlal v16.8h, v8.8b, v12.8b \n"
// r2
"prfm pldl1keep, [%7, #128] \n"
"ld2 {v4.8b, v5.8b}, [%7], #16 \n"
"ld2 {v6.8b, v7.8b}, [%7] \n"
"ext v8.8b, v4.8b, v6.8b, #1 \n"
"dup v9.8b, %16.b[6] \n"
"dup v10.8b, %17.b[6] \n"
"dup v11.8b, %18.b[6] \n"
"dup v12.8b, %19.b[6] \n"
"smlal v13.8h, v4.8b, v9.8b \n"
"smlal v14.8h, v4.8b, v10.8b \n"
"smlal v15.8h, v4.8b, v11.8b \n"
"smlal v16.8h, v4.8b, v12.8b \n"
"dup v9.8b, %16.b[7] \n"
"dup v10.8b, %17.b[7] \n"
"dup v11.8b, %18.b[7] \n"
"dup v12.8b, %19.b[7] \n"
"smlal v13.8h, v5.8b, v9.8b \n"
"smlal v14.8h, v5.8b, v10.8b \n"
"smlal v15.8h, v5.8b, v11.8b \n"
"smlal v16.8h, v5.8b, v12.8b \n"
"dup v9.8b, %16.b[8] \n"
"dup v10.8b, %17.b[8] \n"
"dup v11.8b, %18.b[8] \n"
"dup v12.8b, %19.b[8] \n"
"smlal v13.8h, v8.8b, v9.8b \n"
"smlal v14.8h, v8.8b, v10.8b \n"
"smlal v15.8h, v8.8b, v11.8b \n"
"smlal v16.8h, v8.8b, v12.8b \n"
// sum0 - sum3
"prfm pldl1keep, [%1, #128] \n"
"prfm pldl1keep, [%2, #128] \n"
"prfm pldl1keep, [%3, #128] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v17.4s}, [%1] \n"
"ld1 {v19.4s}, [%2] \n"
"ld1 {v21.4s}, [%3] \n"
"ld1 {v23.4s}, [%4] \n"
"saddw v17.4s, v17.4s, v13.4h \n"
"saddw v19.4s, v19.4s, v14.4h \n"
"saddw v21.4s, v21.4s, v15.4h \n"
"saddw v23.4s, v23.4s, v16.4h \n"
"st1 {v17.4s}, [%1], #16 \n"
"st1 {v19.4s}, [%2], #16 \n"
"st1 {v21.4s}, [%3], #16 \n"
"st1 {v23.4s}, [%4], #16 \n"
"sub %5, %5, #8 \n"
"sub %6, %6, #8 \n"
"sub %7, %7, #8 \n"
: "=r"(nn), //%0
"=r"(outptr0), //%1
"=r"(outptr1), //%2
"=r"(outptr2), //%3
"=r"(outptr3), //%4
"=r"(r0), //%5
"=r"(r1), //%6
"=r"(r2) //%7
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(r0),
"6"(r1),
"7"(r2),
"w"(_k0), //%16
"w"(_k1), //%17
"w"(_k2), //%18
"w"(_k3) //%19
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24"
);
}
for (; remain>0; remain--)
{
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
sum1 += (int)r0[0] * kernel1[0];
sum1 += (int)r0[1] * kernel1[1];
sum1 += (int)r0[2] * kernel1[2];
sum1 += (int)r1[0] * kernel1[3];
sum1 += (int)r1[1] * kernel1[4];
sum1 += (int)r1[2] * kernel1[5];
sum1 += (int)r2[0] * kernel1[6];
sum1 += (int)r2[1] * kernel1[7];
sum1 += (int)r2[2] * kernel1[8];
sum2 += (int)r0[0] * kernel2[0];
sum2 += (int)r0[1] * kernel2[1];
sum2 += (int)r0[2] * kernel2[2];
sum2 += (int)r1[0] * kernel2[3];
sum2 += (int)r1[1] * kernel2[4];
sum2 += (int)r1[2] * kernel2[5];
sum2 += (int)r2[0] * kernel2[6];
sum2 += (int)r2[1] * kernel2[7];
sum2 += (int)r2[2] * kernel2[8];
sum3 += (int)r0[0] * kernel3[0];
sum3 += (int)r0[1] * kernel3[1];
sum3 += (int)r0[2] * kernel3[2];
sum3 += (int)r1[0] * kernel3[3];
sum3 += (int)r1[1] * kernel3[4];
sum3 += (int)r1[2] * kernel3[5];
sum3 += (int)r2[0] * kernel3[6];
sum3 += (int)r2[1] * kernel3[7];
sum3 += (int)r2[2] * kernel3[8];
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
kernel1 += 9;
kernel2 += 9;
kernel3 += 9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0.f);
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9;
for (int q=0; q<inch; q++)
{
int* outptr0 = out0;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
int i = 0;
int8x8_t _k0 = vdup_n_s8(kernel[0]);
int8x8_t _k1 = vdup_n_s8(kernel[1]);
int8x8_t _k2 = vdup_n_s8(kernel[2]);
int8x8_t _k3 = vdup_n_s8(kernel[3]);
int8x8_t _k4 = vdup_n_s8(kernel[4]);
int8x8_t _k5 = vdup_n_s8(kernel[5]);
int8x8_t _k6 = vdup_n_s8(kernel[6]);
int8x8_t _k7 = vdup_n_s8(kernel[7]);
int8x8_t _k8 = vdup_n_s8(kernel[8]);
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
remain = outw;
for (; nn >0; nn--)
{
int8x8x2_t _r0 = vld2_s8(r0);
int8x8x2_t _r0n = vld2_s8(r0+16);
int8x8_t _r00 = _r0.val[0];
int8x8_t _r01 = _r0.val[1];
int8x8_t _r02 = vext_s8(_r00, _r0n.val[0], 1);
int16x8_t _sum = vmull_s8(_r00, _k0);
_sum = vmlal_s8(_sum, _r01, _k1);
_sum = vmlal_s8(_sum, _r02, _k2);
int8x8x2_t _r1 = vld2_s8(r1);
int8x8x2_t _r1n = vld2_s8(r1+16);
int8x8_t _r10 = _r1.val[0];
int8x8_t _r11 = _r1.val[1];
int8x8_t _r12 = vext_s8(_r10, _r1n.val[0], 1);
_sum = vmlal_s8(_sum, _r10, _k3);
_sum = vmlal_s8(_sum, _r11, _k4);
_sum = vmlal_s8(_sum, _r12, _k5);
int8x8x2_t _r2 = vld2_s8(r2);
int8x8x2_t _r2n = vld2_s8(r2+16);
int8x8_t _r20 = _r2.val[0];
int8x8_t _r21 = _r2.val[1];
int8x8_t _r22 = vext_s8(_r20, _r2n.val[0], 1);
_sum = vmlal_s8(_sum, _r20, _k6);
_sum = vmlal_s8(_sum, _r21, _k7);
_sum = vmlal_s8(_sum, _r22, _k8);
int32x4_t sum0_s32 = vld1q_s32(outptr0);
int32x4_t sum0n_s32 = vld1q_s32(outptr0+4);
sum0_s32 = vaddw_s16(sum0_s32, vget_low_s16(_sum));
sum0n_s32 = vaddw_s16(sum0n_s32, vget_high_s16(_sum));
vst1q_s32(outptr0, sum0_s32);
vst1q_s32(outptr0+4, sum0n_s32);
r0 += 16;
r1 += 16;
r2 += 16;
outptr0 += 8;
}
if (remain >= 4)
{
remain -= 4;
int8x8x2_t _r0 = vld2_s8(r0);
int8x8x2_t _r0n = vld2_s8(r0+16);
int8x8_t _r00 = _r0.val[0];
int8x8_t _r01 = _r0.val[1];
int8x8_t _r02 = vext_s8(_r00, _r0n.val[0], 1);
int16x8_t _sum = vmull_s8(_r00, _k0);
_sum = vmlal_s8(_sum, _r01, _k1);
_sum = vmlal_s8(_sum, _r02, _k2);
int8x8x2_t _r1 = vld2_s8(r1);
int8x8x2_t _r1n = vld2_s8(r1+16);
int8x8_t _r10 = _r1.val[0];
int8x8_t _r11 = _r1.val[1];
int8x8_t _r12 = vext_s8(_r10, _r1n.val[0], 1);
_sum = vmlal_s8(_sum, _r10, _k3);
_sum = vmlal_s8(_sum, _r11, _k4);
_sum = vmlal_s8(_sum, _r12, _k5);
int8x8x2_t _r2 = vld2_s8(r2);
int8x8x2_t _r2n = vld2_s8(r2+16);
int8x8_t _r20 = _r2.val[0];
int8x8_t _r21 = _r2.val[1];
int8x8_t _r22 = vext_s8(_r20, _r2n.val[0], 1);
_sum = vmlal_s8(_sum, _r20, _k6);
_sum = vmlal_s8(_sum, _r21, _k7);
_sum = vmlal_s8(_sum, _r22, _k8);
int32x4_t sum0_s32 = vld1q_s32(outptr0);
sum0_s32 = vaddw_s16(sum0_s32, vget_low_s16(_sum));
vst1q_s32(outptr0, sum0_s32);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 4;
}
for (; remain>0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
}
#else // __aarch64__
static void conv3x3s1_neon_s8(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char* kernel = _kernel;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
out0.fill(0);
out1.fill(0);
const signed char* kernel0 = (const signed char *)kernel + p * inch * 9;
const signed char* kernel1 = (const signed char *)kernel + (p + 1) * inch * 9;
for (int q=0; q<inch; q++)
{
int* outptr0 = out0;
int* outptr1 = out1;
int* outptr0n = outptr0 + outw;
int* outptr1n = outptr1 + outw;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
const signed char* r3 = img0 + w * 3;
const signed char* k00 = kernel0;
const signed char* k03 = kernel0 + 3;
const signed char* k06 = kernel0 + 6;
const signed char* k10 = kernel1;
const signed char* k13 = kernel1 + 3;
const signed char* k16 = kernel1 + 6;
int i = 0;
for (; i+1 < outh; i+=2)
{
int nn = outw >> 3;
int remain = outw & 7;
if (nn > 0)
{
asm volatile(
"vld1.8 {d26-d27}, [%0] \n"
"vld1.8 {d28-d29}, [%1] \n"
: "=r"(kernel0), // %0
"=r"(kernel1) // %1
: "0"(kernel0),
"1"(kernel1)
: "cc", "memory"
);
asm volatile(
"0: \n"
"pld [%5, #128] \n"
"vld1.32 {d0-d1}, [%5] \n"// r0
"add %5, #8 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%6, #128] \n"
"vld1.32 {d6-d7}, [%6] \n"// r1
"add %6, #8 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%7, #128] \n"
"vld1.32 {d10-d11}, [%7] \n"// r2
"add %7, #8 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%8, #128] \n"
"vld1.32 {d14-d15}, [%8] \n"// r3
"add %8, #8 \n"
"vext.8 d16, d14, d15, #1 \n"
"vext.8 d17, d14, d15, #2 \n"
"pld [%1, #128] \n"
"vld1.32 {d18-d21}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%1]! \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d6, d1 \n"// k0
"vmlal.s8 q2, d8, d30 \n"// k1
"vmlal.s8 q2, d9, d31 \n"// k2
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d10, d1 \n"// k3
"vmlal.s8 q2, d12, d30 \n"// k4
"vmlal.s8 q2, d13, d31 \n"// k5
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d14, d1 \n"// k6
"vmlal.s8 q2, d16, d30 \n"// k7
"vmlal.s8 q2, d17, d31 \n"// k8
"pld [%2, #128] \n"
"vld1.32 {d18-d21}, [%2] \n"// sum0n
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%2]! \n"
"vdup.s8 d1, d28[0] \n"
"vdup.s8 d30, d28[1] \n"
"vdup.s8 d31, d28[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0n
"vmlal.s8 q2, d2, d30 \n"// k1n
"vmlal.s8 q2, d3, d31 \n"// k2n
"vdup.s8 d1, d28[3] \n"
"vdup.s8 d30, d28[4] \n"
"vdup.s8 d31, d28[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3n
"vmlal.s8 q2, d8, d30 \n"// k4n
"vmlal.s8 q2, d9, d31 \n"// k5n
"vdup.s8 d1, d28[6] \n"
"vdup.s8 d30, d28[7] \n"
"vdup.s8 d31, d29[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6n
"vmlal.s8 q2, d12, d30 \n"// k7n
"vmlal.s8 q2, d13, d31 \n"// k8n
"pld [%3, #128] \n"
"vld1.32 {d18-d21}, [%3] \n"// sum1
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%3]! \n"
"vdup.s8 d1, d28[0] \n"
"vdup.s8 d30, d28[1] \n"
"vdup.s8 d31, d28[2] \n"
"vmull.s8 q2, d6, d1 \n"// k0n
"vmlal.s8 q2, d8, d30 \n"// k1n
"vmlal.s8 q2, d9, d31 \n"// k2n
"vdup.s8 d1, d28[3] \n"
"vdup.s8 d30, d28[4] \n"
"vdup.s8 d31, d28[5] \n"
"vmlal.s8 q2, d10, d1 \n"// k3n
"vmlal.s8 q2, d12, d30 \n"// k4n
"vmlal.s8 q2, d13, d31 \n"// k5n
"vdup.s8 d1, d28[6] \n"
"vdup.s8 d30, d28[7] \n"
"vdup.s8 d31, d29[0] \n"
"vmlal.s8 q2, d14, d1 \n"// k6n
"vmlal.s8 q2, d16, d30 \n"// k7n
"vmlal.s8 q2, d17, d31 \n"// k8n
"pld [%4, #128] \n"
"vld1.32 {d18-d21}, [%4] \n"// sum1n
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%4]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(outptr1), // %3
"=r"(outptr1n), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(outptr1),
"4"(outptr1n),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q15"
);
}
for (; remain>0; remain--)
{
int sum0 = 0;
int sum0n = 0;
int sum1 = 0;
int sum1n = 0;
//ToDo Neon
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
sum1 += (int)r0[0] * kernel1[0];
sum1 += (int)r0[1] * kernel1[1];
sum1 += (int)r0[2] * kernel1[2];
sum1 += (int)r1[0] * kernel1[3];
sum1 += (int)r1[1] * kernel1[4];
sum1 += (int)r1[2] * kernel1[5];
sum1 += (int)r2[0] * kernel1[6];
sum1 += (int)r2[1] * kernel1[7];
sum1 += (int)r2[2] * kernel1[8];
sum0n += (int)r1[0] * kernel0[0];
sum0n += (int)r1[1] * kernel0[1];
sum0n += (int)r1[2] * kernel0[2];
sum0n += (int)r2[0] * kernel0[3];
sum0n += (int)r2[1] * kernel0[4];
sum0n += (int)r2[2] * kernel0[5];
sum0n += (int)r3[0] * kernel0[6];
sum0n += (int)r3[1] * kernel0[7];
sum0n += (int)r3[2] * kernel0[8];
sum1n += (int)r1[0] * kernel1[0];
sum1n += (int)r1[1] * kernel1[1];
sum1n += (int)r1[2] * kernel1[2];
sum1n += (int)r2[0] * kernel1[3];
sum1n += (int)r2[1] * kernel1[4];
sum1n += (int)r2[2] * kernel1[5];
sum1n += (int)r3[0] * kernel1[6];
sum1n += (int)r3[1] * kernel1[7];
sum1n += (int)r3[2] * kernel1[8];
*outptr0 += sum0;
*outptr1 += sum1;
*outptr0n += sum0n;
*outptr1n += sum1n;
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr1++;
outptr0n++;
outptr1n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr1 += outw;
outptr0n += outw;
outptr1n += outw;
}
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
if (nn > 0)
{
asm volatile(
"vld1.8 {d26-d27}, [%0] \n"
"vld1.8 {d28-d29}, [%1] \n"
: "=r"(kernel0), // %0
"=r"(kernel1) // %1
: "0"(kernel0),
"1"(kernel1)
: "cc", "memory"
);
asm volatile(
"0: \n"
"pld [%3, #128] \n"
"vld1.32 {d0-d1}, [%3] \n"// r0
"add %3, #8 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%4, #128] \n"
"vld1.32 {d6-d7}, [%4] \n"// r1
"add %4, #8 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%5, #128] \n"
"vld1.32 {d10-d11}, [%5] \n"// r2
"add %5, #8 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%1, #128] \n"
"vld1.32 {d18-d21}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%1]! \n"
"vdup.s8 d1, d28[0] \n"
"vdup.s8 d7, d28[1] \n"
"vdup.s8 d11, d28[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0n
"vmlal.s8 q2, d2, d7 \n"// k1n
"vmlal.s8 q2, d3, d11 \n"// k2n
"vdup.s8 d1, d28[3] \n"
"vdup.s8 d7, d28[4] \n"
"vdup.s8 d11, d28[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3n
"vmlal.s8 q2, d8, d7 \n"// k4n
"vmlal.s8 q2, d9, d11 \n"// k5n
"vdup.s8 d1, d28[6] \n"
"vdup.s8 d7, d28[7] \n"
"vdup.s8 d11, d29[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6n
"vmlal.s8 q2, d12, d7 \n"// k7n
"vmlal.s8 q2, d13, d11 \n"// k8n
"pld [%2, #128] \n"
"vld1.32 {d18-d21}, [%2] \n"// sum1
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
for (; remain>0; remain--)
{
int sum0 = 0;
int sum1 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
sum1 += (int)r0[0] * kernel1[0];
sum1 += (int)r0[1] * kernel1[1];
sum1 += (int)r0[2] * kernel1[2];
sum1 += (int)r1[0] * kernel1[3];
sum1 += (int)r1[1] * kernel1[4];
sum1 += (int)r1[2] * kernel1[5];
sum1 += (int)r2[0] * kernel1[6];
sum1 += (int)r2[1] * kernel1[7];
sum1 += (int)r2[2] * kernel1[8];
*outptr0 += sum0;
*outptr1 += sum1;
r0++;
r1++;
r2++;
outptr0++;
outptr1++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
kernel1 += 9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char* kernel0 = (const signed char *)kernel + p * inch * 9;
for (int q=0; q<inch; q++)
{
int* outptr0 = out0;
int* outptr0n = outptr0 + outw;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
const signed char* r3 = img0 + w * 3;
const signed char* k00 = kernel0;
const signed char* k03 = kernel0 + 3;
const signed char* k06 = kernel0 + 6;
int i = 0;
for (; i+1 < outh; i+=2)
{
int nn = outw >> 3;
int remain = outw & 7;
if (nn > 0)
{
asm volatile(
"vld1.8 {d26-d27}, [%0] \n"
: "=r"(kernel0) // %0
: "0"(kernel0)
: "cc", "memory"
);
asm volatile(
"0: \n"
"pld [%3, #128] \n"
"vld1.32 {d0-d1}, [%3] \n"// r0
"add %3, #8 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%4, #128] \n"
"vld1.32 {d6-d7}, [%4] \n"// r1
"add %4, #8 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%5, #128] \n"
"vld1.32 {d10-d11}, [%5] \n"// r2
"add %5, #8 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%6, #128] \n"
"vld1.32 {d14-d15}, [%6] \n"// r3
"add %6, #8 \n"
"vext.8 d16, d14, d15, #1 \n"
"vext.8 d17, d14, d15, #2 \n"
"pld [%1, #128] \n"
"vld1.32 {d18-d21}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%1]! \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d6, d1 \n"// k0
"vmlal.s8 q2, d8, d30 \n"// k1
"vmlal.s8 q2, d9, d31 \n"// k2
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d10, d1 \n"// k3
"vmlal.s8 q2, d12, d30 \n"// k4
"vmlal.s8 q2, d13, d31 \n"// k5
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d14, d1 \n"// k6
"vmlal.s8 q2, d16, d30 \n"// k7
"vmlal.s8 q2, d17, d31 \n"// k8
"pld [%2, #128] \n"
"vld1.32 {d18-d21}, [%2] \n"// sum0n
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
for (; remain>0; remain--)
{
//Todo Neon
int sum0 = 0;
int sum0n = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
sum0n += (int)r1[0] * kernel0[0];
sum0n += (int)r1[1] * kernel0[1];
sum0n += (int)r1[2] * kernel0[2];
sum0n += (int)r2[0] * kernel0[3];
sum0n += (int)r2[1] * kernel0[4];
sum0n += (int)r2[2] * kernel0[5];
sum0n += (int)r3[0] * kernel0[6];
sum0n += (int)r3[1] * kernel0[7];
sum0n += (int)r3[2] * kernel0[8];
*outptr0 += sum0;
*outptr0n += sum0n;
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr0n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr0n += outw;
}
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
if (nn > 0)
{
asm volatile(
"vld1.8 {d26-d27}, [%0] \n"
: "=r"(kernel0) // %0
: "0"(kernel0)
: "cc", "memory"
);
asm volatile(
"0: \n"
"pld [%2, #128] \n"
"vld1.32 {d0-d1}, [%2] \n"// r0
"add %2, #8 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%3, #128] \n"
"vld1.32 {d6-d7}, [%3] \n"// r1
"add %3, #8 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%4, #128] \n"
"vld1.32 {d10-d11}, [%4] \n"// r2
"add %4, #8 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%1, #128] \n"
"vld1.32 {d18-d21}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
for (; remain>0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
}
static void conv3x3s1_neon_s8_left4(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
out0.fill(0);
out1.fill(0);
const signed char* kernel0 = (const signed char *)kernel + p * inch * 9;
const signed char* kernel1 = (const signed char *)kernel + (p + 1) * inch * 9;
for (int q=0; q<inch; q++)
{
int* outptr0 = out0;
int* outptr1 = out1;
int* outptr0n = outptr0 + outw;
int* outptr1n = outptr1 + outw;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w*2;
const signed char* r3 = img0 + w*3;
const signed char* k00 = kernel0;
const signed char* k03 = kernel0 + 3;
const signed char* k06 = kernel0 + 6;
const signed char* k10 = kernel1;
const signed char* k13 = kernel1 + 3;
const signed char* k16 = kernel1 + 6;
int i = 0;
for (; i+1 < outh; i+=2)
{
int nn = outw >> 3;
int remain = outw & 7;
asm volatile(
"vld1.8 {d26-d27}, [%0] \n" // k00 k01 k02 k03 k04 k05 k06 k07 k08
"vld1.8 {d28-d29}, [%1] \n" // k10 k11 k12 k13 k14 k15 k16 k17 k18
: "=r"(kernel0), // %0
"=r"(kernel1) // %1
: "0"(kernel0),
"1"(kernel1)
: "cc", "memory"
);
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%5, #128] \n"
"vld1.32 {d0-d1}, [%5] \n"// r0
"add %5, #8 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%6, #128] \n"
"vld1.32 {d6-d7}, [%6] \n"// r1
"add %6, #8 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%7, #128] \n"
"vld1.32 {d10-d11}, [%7] \n"// r2
"add %7, #8 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%8, #128] \n"
"vld1.32 {d14-d15}, [%8] \n"// r3
"add %8, #8 \n"
"vext.8 d16, d14, d15, #1 \n"
"vext.8 d17, d14, d15, #2 \n"
"pld [%1, #128] \n"
"vld1.32 {d18-d21}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%1]! \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d6, d1 \n"// k0
"vmlal.s8 q2, d8, d30 \n"// k1
"vmlal.s8 q2, d9, d31 \n"// k2
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d10, d1 \n"// k3
"vmlal.s8 q2, d12, d30 \n"// k4
"vmlal.s8 q2, d13, d31 \n"// k5
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d14, d1 \n"// k6
"vmlal.s8 q2, d16, d30 \n"// k7
"vmlal.s8 q2, d17, d31 \n"// k8
"pld [%2, #128] \n"
"vld1.32 {d18-d21}, [%2] \n"// sum0n
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%2]! \n"
"vdup.s8 d1, d28[0] \n"
"vdup.s8 d30, d28[1] \n"
"vdup.s8 d31, d28[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0n
"vmlal.s8 q2, d2, d30 \n"// k1n
"vmlal.s8 q2, d3, d31 \n"// k2n
"vdup.s8 d1, d28[3] \n"
"vdup.s8 d30, d28[4] \n"
"vdup.s8 d31, d28[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3n
"vmlal.s8 q2, d8, d30 \n"// k4n
"vmlal.s8 q2, d9, d31 \n"// k5n
"vdup.s8 d1, d28[6] \n"
"vdup.s8 d30, d28[7] \n"
"vdup.s8 d31, d29[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6n
"vmlal.s8 q2, d12, d30 \n"// k7n
"vmlal.s8 q2, d13, d31 \n"// k8n
"pld [%3, #128] \n"
"vld1.32 {d18-d21}, [%3] \n"// sum1
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%3]! \n"
"vdup.s8 d1, d28[0] \n"
"vdup.s8 d30, d28[1] \n"
"vdup.s8 d31, d28[2] \n"
"vmull.s8 q2, d6, d1 \n"// k0n
"vmlal.s8 q2, d8, d30 \n"// k1n
"vmlal.s8 q2, d9, d31 \n"// k2n
"vdup.s8 d1, d28[3] \n"
"vdup.s8 d30, d28[4] \n"
"vdup.s8 d31, d28[5] \n"
"vmlal.s8 q2, d10, d1 \n"// k3n
"vmlal.s8 q2, d12, d30 \n"// k4n
"vmlal.s8 q2, d13, d31 \n"// k5n
"vdup.s8 d1, d28[6] \n"
"vdup.s8 d30, d28[7] \n"
"vdup.s8 d31, d29[0] \n"
"vmlal.s8 q2, d14, d1 \n"// k6n
"vmlal.s8 q2, d16, d30 \n"// k7n
"vmlal.s8 q2, d17, d31 \n"// k8n
"pld [%4, #128] \n"
"vld1.32 {d18-d21}, [%4] \n"// sum1n
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%4]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(outptr1), // %3
"=r"(outptr1n), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(outptr1),
"4"(outptr1n),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q15"
);
}
asm volatile(
"pld [%5, #128] \n"
"vld1.32 {d0-d1}, [%5] \n"// r0
"add %5, #4 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%6, #128] \n"
"vld1.32 {d6-d7}, [%6] \n"// r1
"add %6, #4 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%7, #128] \n"
"vld1.32 {d10-d11}, [%7] \n"// r2
"add %7, #4 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%8, #128] \n"
"vld1.32 {d14-d15}, [%8] \n"// r3
"add %8, #4 \n"
"vext.8 d16, d14, d15, #1 \n"
"vext.8 d17, d14, d15, #2 \n"
"pld [%1, #128] \n"
"vld1.32 {d18-d19}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vst1.32 {d18-d19}, [%1]! \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d6, d1 \n"// k0
"vmlal.s8 q2, d8, d30 \n"// k1
"vmlal.s8 q2, d9, d31 \n"// k2
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d10, d1 \n"// k3
"vmlal.s8 q2, d12, d30 \n"// k4
"vmlal.s8 q2, d13, d31 \n"// k5
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d14, d1 \n"// k6
"vmlal.s8 q2, d16, d30 \n"// k7
"vmlal.s8 q2, d17, d31 \n"// k8
"pld [%2, #128] \n"
"vld1.32 {d18-d19}, [%2] \n"// sum0n
"vaddw.s16 q9, q9, d4 \n"
"vst1.32 {d18-d19}, [%2]! \n"
"vdup.s8 d1, d28[0] \n"
"vdup.s8 d30, d28[1] \n"
"vdup.s8 d31, d28[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0n
"vmlal.s8 q2, d2, d30 \n"// k1n
"vmlal.s8 q2, d3, d31 \n"// k2n
"vdup.s8 d1, d28[3] \n"
"vdup.s8 d30, d28[4] \n"
"vdup.s8 d31, d28[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3n
"vmlal.s8 q2, d8, d30 \n"// k4n
"vmlal.s8 q2, d9, d31 \n"// k5n
"vdup.s8 d1, d28[6] \n"
"vdup.s8 d30, d28[7] \n"
"vdup.s8 d31, d29[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6n
"vmlal.s8 q2, d12, d30 \n"// k7n
"vmlal.s8 q2, d13, d31 \n"// k8n
"pld [%3, #128] \n"
"vld1.32 {d18-d19}, [%3] \n"// sum1
"vaddw.s16 q9, q9, d4 \n"
"vst1.32 {d18-d19}, [%3]! \n"
"vdup.s8 d1, d28[0] \n"
"vdup.s8 d30, d28[1] \n"
"vdup.s8 d31, d28[2] \n"
"vmull.s8 q2, d6, d1 \n"// k0n
"vmlal.s8 q2, d8, d30 \n"// k1n
"vmlal.s8 q2, d9, d31 \n"// k2n
"vdup.s8 d1, d28[3] \n"
"vdup.s8 d30, d28[4] \n"
"vdup.s8 d31, d28[5] \n"
"vmlal.s8 q2, d10, d1 \n"// k3n
"vmlal.s8 q2, d12, d30 \n"// k4n
"vmlal.s8 q2, d13, d31 \n"// k5n
"vdup.s8 d1, d28[6] \n"
"vdup.s8 d30, d28[7] \n"
"vdup.s8 d31, d29[0] \n"
"vmlal.s8 q2, d14, d1 \n"// k6n
"vmlal.s8 q2, d16, d30 \n"// k7n
"vmlal.s8 q2, d17, d31 \n"// k8n
"pld [%4, #128] \n"
"vld1.32 {d18-d19}, [%4] \n"// sum1n
"vaddw.s16 q9, q9, d4 \n"
"vst1.32 {d18-d19}, [%4]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(outptr1), // %3
"=r"(outptr1n), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(outptr1),
"4"(outptr1n),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q15"
);
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr1 += outw;
outptr0n += outw;
outptr1n += outw;
}
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
asm volatile(
"vld1.8 {d26-d27}, [%0] \n"
"vld1.8 {d28-d29}, [%1] \n"
: "=r"(kernel0), // %0
"=r"(kernel1) // %1
: "0"(kernel0),
"1"(kernel1)
: "cc", "memory"
);
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%3, #128] \n"
"vld1.32 {d0-d1}, [%3] \n"// r0
"add %3, #8 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%4, #128] \n"
"vld1.32 {d6-d7}, [%4] \n"// r1
"add %4, #8 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%5, #128] \n"
"vld1.32 {d10-d11}, [%5] \n"// r2
"add %5, #8 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%1, #128] \n"
"vld1.32 {d18-d21}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%1]! \n"
"vdup.s8 d1, d28[0] \n"
"vdup.s8 d7, d28[1] \n"
"vdup.s8 d11, d28[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0n
"vmlal.s8 q2, d2, d7 \n"// k1n
"vmlal.s8 q2, d3, d11 \n"// k2n
"vdup.s8 d1, d28[3] \n"
"vdup.s8 d7, d28[4] \n"
"vdup.s8 d11, d28[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3n
"vmlal.s8 q2, d8, d7 \n"// k4n
"vmlal.s8 q2, d9, d11 \n"// k5n
"vdup.s8 d1, d28[6] \n"
"vdup.s8 d7, d28[7] \n"
"vdup.s8 d11, d29[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6n
"vmlal.s8 q2, d12, d7 \n"// k7n
"vmlal.s8 q2, d13, d11 \n"// k8n
"pld [%2, #128] \n"
"vld1.32 {d18-d21}, [%2] \n"// sum1
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
asm volatile(
"pld [%3, #128] \n"
"vld1.32 {d0-d1}, [%3] \n"// r0
"add %3, #4 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%4, #128] \n"
"vld1.32 {d6-d7}, [%4] \n"// r1
"add %4, #4 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%5, #128] \n"
"vld1.32 {d10-d11}, [%5] \n"// r2
"add %5, #4 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%1, #128] \n"
"vld1.32 {d18-d19}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vst1.32 {d18-d19}, [%1]! \n"
"vdup.s8 d1, d28[0] \n"
"vdup.s8 d7, d28[1] \n"
"vdup.s8 d11, d28[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0n
"vmlal.s8 q2, d2, d7 \n"// k1n
"vmlal.s8 q2, d3, d11 \n"// k2n
"vdup.s8 d1, d28[3] \n"
"vdup.s8 d7, d28[4] \n"
"vdup.s8 d11, d28[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3n
"vmlal.s8 q2, d8, d7 \n"// k4n
"vmlal.s8 q2, d9, d11 \n"// k5n
"vdup.s8 d1, d28[6] \n"
"vdup.s8 d7, d28[7] \n"
"vdup.s8 d11, d29[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6n
"vmlal.s8 q2, d12, d7 \n"// k7n
"vmlal.s8 q2, d13, d11 \n"// k8n
"pld [%2, #128] \n"
"vld1.32 {d18-d19}, [%2] \n"// sum1
"vaddw.s16 q9, q9, d4 \n"
"vst1.32 {d18-d19}, [%2]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
kernel1 += 9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char* kernel0 = (const signed char *)kernel + p * inch * 9;
for (int q=0; q<inch; q++)
{
int* outptr0 = out0;
int* outptr0n = outptr0 + outw;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w*2;
const signed char* r3 = img0 + w*3;
const signed char* k00 = kernel0;
const signed char* k03 = kernel0 + 3;
const signed char* k06 = kernel0 + 6;
int i = 0;
for (; i+1 < outh; i+=2)
{
int nn = outw >> 3;
int remain = outw & 7;
asm volatile(
"vld1.8 {d26-d27}, [%0] \n"
: "=r"(kernel0) // %0
: "0"(kernel0)
: "cc", "memory"
);
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%3, #128] \n"
"vld1.32 {d0-d1}, [%3] \n"// r0
"add %3, #8 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%4, #128] \n"
"vld1.32 {d6-d7}, [%4] \n"// r1
"add %4, #8 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%5, #128] \n"
"vld1.32 {d10-d11}, [%5] \n"// r2
"add %5, #8 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%6, #128] \n"
"vld1.32 {d14-d15}, [%6] \n"// r3
"add %6, #8 \n"
"vext.8 d16, d14, d15, #1 \n"
"vext.8 d17, d14, d15, #2 \n"
"pld [%1, #128] \n"
"vld1.32 {d18-d21}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%1]! \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d6, d1 \n"// k0
"vmlal.s8 q2, d8, d30 \n"// k1
"vmlal.s8 q2, d9, d31 \n"// k2
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d10, d1 \n"// k3
"vmlal.s8 q2, d12, d30 \n"// k4
"vmlal.s8 q2, d13, d31 \n"// k5
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d14, d1 \n"// k6
"vmlal.s8 q2, d16, d30 \n"// k7
"vmlal.s8 q2, d17, d31 \n"// k8
"pld [%2, #128] \n"
"vld1.32 {d18-d21}, [%2] \n"// sum0n
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
asm volatile(
"pld [%3, #128] \n"
"vld1.32 {d0-d1}, [%3] \n"// r0
"add %3, #4 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%4, #128] \n"
"vld1.32 {d6-d7}, [%4] \n"// r1
"add %4, #4 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%5, #128] \n"
"vld1.32 {d10-d11}, [%5] \n"// r2
"add %5, #4 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%6, #128] \n"
"vld1.32 {d14-d15}, [%6] \n"// r3
"add %6, #4 \n"
"vext.8 d16, d14, d15, #1 \n"
"vext.8 d17, d14, d15, #2 \n"
"pld [%1, #128] \n"
"vld1.32 {d18-d19}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vst1.32 {d18-d19}, [%1]! \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d6, d1 \n"// k0
"vmlal.s8 q2, d8, d30 \n"// k1
"vmlal.s8 q2, d9, d31 \n"// k2
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d10, d1 \n"// k3
"vmlal.s8 q2, d12, d30 \n"// k4
"vmlal.s8 q2, d13, d31 \n"// k5
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d14, d1 \n"// k6
"vmlal.s8 q2, d16, d30 \n"// k7
"vmlal.s8 q2, d17, d31 \n"// k8
"pld [%2, #128] \n"
"vld1.32 {d18-d19}, [%2] \n"// sum0n
"vaddw.s16 q9, q9, d4 \n"
"vst1.32 {d18-d19}, [%2]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr0n += outw;
}
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
asm volatile(
"vld1.8 {d26-d27}, [%0] \n"
: "=r"(kernel0) // %0
: "0"(kernel0)
: "cc", "memory"
);
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%2, #128] \n"
"vld1.32 {d0-d1}, [%2] \n"// r0
"add %2, #8 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%3, #128] \n"
"vld1.32 {d6-d7}, [%3] \n"// r1
"add %3, #8 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%4, #128] \n"
"vld1.32 {d10-d11}, [%4] \n"// r2
"add %4, #8 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%1, #128] \n"
"vld1.32 {d18-d21}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
asm volatile(
"pld [%2, #128] \n"
"vld1.32 {d0-d1}, [%2] \n"// r0
"add %2, #4 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%3, #128] \n"
"vld1.32 {d6-d7}, [%3] \n"// r1
"add %3, #4 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%4, #128] \n"
"vld1.32 {d10-d11}, [%4] \n"// r2
"add %4, #4 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%1, #128] \n"
"vld1.32 {d18-d19}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vst1.32 {d18-d19}, [%1]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
}
static void conv3x3s1_neon_s8_left6(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
out0.fill(0);
out1.fill(0);
const signed char* kernel0 = (const signed char *)kernel + p * inch * 9;
const signed char* kernel1 = (const signed char *)kernel + (p + 1) * inch * 9;
for (int q=0; q<inch; q++)
{
int* outptr0 = out0;
int* outptr1 = out1;
int* outptr0n = outptr0 + outw;
int* outptr1n = outptr1 + outw;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w*2;
const signed char* r3 = img0 + w*3;
const signed char* k00 = kernel0;
const signed char* k03 = kernel0 + 3;
const signed char* k06 = kernel0 + 6;
const signed char* k10 = kernel1;
const signed char* k13 = kernel1 + 3;
const signed char* k16 = kernel1 + 6;
int i = 0;
for (; i+1 < outh; i+=2)
{
int nn = outw >> 3;
int remain = outw & 7;
asm volatile(
"vld1.8 {d26-d27}, [%0] \n" // k00 k01 k02 k03 k04 k05 k06 k07 k08
"vld1.8 {d28-d29}, [%1] \n" // k10 k11 k12 k13 k14 k15 k16 k17 k18
: "=r"(kernel0), // %0
"=r"(kernel1) // %1
: "0"(kernel0),
"1"(kernel1)
: "cc", "memory"
);
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%5, #128] \n"
"vld1.32 {d0-d1}, [%5] \n"// r0
"add %5, #8 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%6, #128] \n"
"vld1.32 {d6-d7}, [%6] \n"// r1
"add %6, #8 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%7, #128] \n"
"vld1.32 {d10-d11}, [%7] \n"// r2
"add %7, #8 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%8, #128] \n"
"vld1.32 {d14-d15}, [%8] \n"// r3
"add %8, #8 \n"
"vext.8 d16, d14, d15, #1 \n"
"vext.8 d17, d14, d15, #2 \n"
"pld [%1, #128] \n"
"vld1.32 {d18-d21}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%1]! \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d6, d1 \n"// k0
"vmlal.s8 q2, d8, d30 \n"// k1
"vmlal.s8 q2, d9, d31 \n"// k2
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d10, d1 \n"// k3
"vmlal.s8 q2, d12, d30 \n"// k4
"vmlal.s8 q2, d13, d31 \n"// k5
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d14, d1 \n"// k6
"vmlal.s8 q2, d16, d30 \n"// k7
"vmlal.s8 q2, d17, d31 \n"// k8
"pld [%2, #128] \n"
"vld1.32 {d18-d21}, [%2] \n"// sum0n
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%2]! \n"
"vdup.s8 d1, d28[0] \n"
"vdup.s8 d30, d28[1] \n"
"vdup.s8 d31, d28[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0n
"vmlal.s8 q2, d2, d30 \n"// k1n
"vmlal.s8 q2, d3, d31 \n"// k2n
"vdup.s8 d1, d28[3] \n"
"vdup.s8 d30, d28[4] \n"
"vdup.s8 d31, d28[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3n
"vmlal.s8 q2, d8, d30 \n"// k4n
"vmlal.s8 q2, d9, d31 \n"// k5n
"vdup.s8 d1, d28[6] \n"
"vdup.s8 d30, d28[7] \n"
"vdup.s8 d31, d29[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6n
"vmlal.s8 q2, d12, d30 \n"// k7n
"vmlal.s8 q2, d13, d31 \n"// k8n
"pld [%3, #128] \n"
"vld1.32 {d18-d21}, [%3] \n"// sum1
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%3]! \n"
"vdup.s8 d1, d28[0] \n"
"vdup.s8 d30, d28[1] \n"
"vdup.s8 d31, d28[2] \n"
"vmull.s8 q2, d6, d1 \n"// k0n
"vmlal.s8 q2, d8, d30 \n"// k1n
"vmlal.s8 q2, d9, d31 \n"// k2n
"vdup.s8 d1, d28[3] \n"
"vdup.s8 d30, d28[4] \n"
"vdup.s8 d31, d28[5] \n"
"vmlal.s8 q2, d10, d1 \n"// k3n
"vmlal.s8 q2, d12, d30 \n"// k4n
"vmlal.s8 q2, d13, d31 \n"// k5n
"vdup.s8 d1, d28[6] \n"
"vdup.s8 d30, d28[7] \n"
"vdup.s8 d31, d29[0] \n"
"vmlal.s8 q2, d14, d1 \n"// k6n
"vmlal.s8 q2, d16, d30 \n"// k7n
"vmlal.s8 q2, d17, d31 \n"// k8n
"pld [%4, #128] \n"
"vld1.32 {d18-d21}, [%4] \n"// sum1n
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%4]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(outptr1), // %3
"=r"(outptr1n), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(outptr1),
"4"(outptr1n),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q15"
);
}
asm volatile(
"pld [%5, #128] \n"
"vld1.32 {d0-d1}, [%5] \n"// r0
"add %5, #6 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%6, #128] \n"
"vld1.32 {d6-d7}, [%6] \n"// r1
"add %6, #6 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%7, #128] \n"
"vld1.32 {d10-d11}, [%7] \n"// r2
"add %7, #6 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%8, #128] \n"
"vld1.32 {d14-d15}, [%8] \n"// r3
"add %8, #6 \n"
"vext.8 d16, d14, d15, #1 \n"
"vext.8 d17, d14, d15, #2 \n"
"pld [%1, #128] \n"
"vld1.32 {d18-d20}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d20}, [%1]! \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d6, d1 \n"// k0
"vmlal.s8 q2, d8, d30 \n"// k1
"vmlal.s8 q2, d9, d31 \n"// k2
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d10, d1 \n"// k3
"vmlal.s8 q2, d12, d30 \n"// k4
"vmlal.s8 q2, d13, d31 \n"// k5
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d14, d1 \n"// k6
"vmlal.s8 q2, d16, d30 \n"// k7
"vmlal.s8 q2, d17, d31 \n"// k8
"pld [%2, #128] \n"
"vld1.32 {d18-d20}, [%2] \n"// sum0n
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d20}, [%2]! \n"
"vdup.s8 d1, d28[0] \n"
"vdup.s8 d30, d28[1] \n"
"vdup.s8 d31, d28[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0n
"vmlal.s8 q2, d2, d30 \n"// k1n
"vmlal.s8 q2, d3, d31 \n"// k2n
"vdup.s8 d1, d28[3] \n"
"vdup.s8 d30, d28[4] \n"
"vdup.s8 d31, d28[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3n
"vmlal.s8 q2, d8, d30 \n"// k4n
"vmlal.s8 q2, d9, d31 \n"// k5n
"vdup.s8 d1, d28[6] \n"
"vdup.s8 d30, d28[7] \n"
"vdup.s8 d31, d29[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6n
"vmlal.s8 q2, d12, d30 \n"// k7n
"vmlal.s8 q2, d13, d31 \n"// k8n
"pld [%3, #128] \n"
"vld1.32 {d18-d20}, [%3] \n"// sum1
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d20}, [%3]! \n"
"vdup.s8 d1, d28[0] \n"
"vdup.s8 d30, d28[1] \n"
"vdup.s8 d31, d28[2] \n"
"vmull.s8 q2, d6, d1 \n"// k0n
"vmlal.s8 q2, d8, d30 \n"// k1n
"vmlal.s8 q2, d9, d31 \n"// k2n
"vdup.s8 d1, d28[3] \n"
"vdup.s8 d30, d28[4] \n"
"vdup.s8 d31, d28[5] \n"
"vmlal.s8 q2, d10, d1 \n"// k3n
"vmlal.s8 q2, d12, d30 \n"// k4n
"vmlal.s8 q2, d13, d31 \n"// k5n
"vdup.s8 d1, d28[6] \n"
"vdup.s8 d30, d28[7] \n"
"vdup.s8 d31, d29[0] \n"
"vmlal.s8 q2, d14, d1 \n"// k6n
"vmlal.s8 q2, d16, d30 \n"// k7n
"vmlal.s8 q2, d17, d31 \n"// k8n
"pld [%4, #128] \n"
"vld1.32 {d18-d20}, [%4] \n"// sum1n
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d20}, [%4]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(outptr1), // %3
"=r"(outptr1n), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(outptr1),
"4"(outptr1n),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q15"
);
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr1 += outw;
outptr0n += outw;
outptr1n += outw;
}
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
asm volatile(
"vld1.8 {d26-d27}, [%0] \n"
"vld1.8 {d28-d29}, [%1] \n"
: "=r"(kernel0), // %0
"=r"(kernel1) // %1
: "0"(kernel0),
"1"(kernel1)
: "cc", "memory"
);
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%3, #128] \n"
"vld1.32 {d0-d1}, [%3] \n"// r0
"add %3, #8 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%4, #128] \n"
"vld1.32 {d6-d7}, [%4] \n"// r1
"add %4, #8 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%5, #128] \n"
"vld1.32 {d10-d11}, [%5] \n"// r2
"add %5, #8 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%1, #128] \n"
"vld1.32 {d18-d21}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%1]! \n"
"vdup.s8 d1, d28[0] \n"
"vdup.s8 d7, d28[1] \n"
"vdup.s8 d11, d28[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0n
"vmlal.s8 q2, d2, d7 \n"// k1n
"vmlal.s8 q2, d3, d11 \n"// k2n
"vdup.s8 d1, d28[3] \n"
"vdup.s8 d7, d28[4] \n"
"vdup.s8 d11, d28[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3n
"vmlal.s8 q2, d8, d7 \n"// k4n
"vmlal.s8 q2, d9, d11 \n"// k5n
"vdup.s8 d1, d28[6] \n"
"vdup.s8 d7, d28[7] \n"
"vdup.s8 d11, d29[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6n
"vmlal.s8 q2, d12, d7 \n"// k7n
"vmlal.s8 q2, d13, d11 \n"// k8n
"pld [%2, #128] \n"
"vld1.32 {d18-d21}, [%2] \n"// sum1
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
asm volatile(
"pld [%3, #128] \n"
"vld1.32 {d0-d1}, [%3] \n"// r0
"add %3, #6 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%4, #128] \n"
"vld1.32 {d6-d7}, [%4] \n"// r1
"add %4, #6 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%5, #128] \n"
"vld1.32 {d10-d11}, [%5] \n"// r2
"add %5, #6 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%1, #128] \n"
"vld1.32 {d18-d20}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d20}, [%1]! \n"
"vdup.s8 d1, d28[0] \n"
"vdup.s8 d7, d28[1] \n"
"vdup.s8 d11, d28[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0n
"vmlal.s8 q2, d2, d7 \n"// k1n
"vmlal.s8 q2, d3, d11 \n"// k2n
"vdup.s8 d1, d28[3] \n"
"vdup.s8 d7, d28[4] \n"
"vdup.s8 d11, d28[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3n
"vmlal.s8 q2, d8, d7 \n"// k4n
"vmlal.s8 q2, d9, d11 \n"// k5n
"vdup.s8 d1, d28[6] \n"
"vdup.s8 d7, d28[7] \n"
"vdup.s8 d11, d29[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6n
"vmlal.s8 q2, d12, d7 \n"// k7n
"vmlal.s8 q2, d13, d11 \n"// k8n
"pld [%2, #128] \n"
"vld1.32 {d18-d20}, [%2] \n"// sum1
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d20}, [%2]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
kernel1 += 9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char* kernel0 = (const signed char *)kernel + p * inch * 9;
for (int q=0; q<inch; q++)
{
int* outptr0 = out0;
int* outptr0n = outptr0 + outw;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
const signed char* r3 = img0 + w * 3;
const signed char* k00 = kernel0;
const signed char* k03 = kernel0 + 3;
const signed char* k06 = kernel0 + 6;
int i = 0;
for (; i+1 < outh; i+=2)
{
int nn = outw >> 3;
int remain = outw & 7;
asm volatile(
"vld1.8 {d26-d27}, [%0] \n"
: "=r"(kernel0) // %0
: "0"(kernel0)
: "cc", "memory"
);
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%3, #128] \n"
"vld1.32 {d0-d1}, [%3] \n"// r0
"add %3, #8 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%4, #128] \n"
"vld1.32 {d6-d7}, [%4] \n"// r1
"add %4, #8 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%5, #128] \n"
"vld1.32 {d10-d11}, [%5] \n"// r2
"add %5, #8 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%6, #128] \n"
"vld1.32 {d14-d15}, [%6] \n"// r3
"add %6, #8 \n"
"vext.8 d16, d14, d15, #1 \n"
"vext.8 d17, d14, d15, #2 \n"
"pld [%1, #128] \n"
"vld1.32 {d18-d21}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%1]! \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d6, d1 \n"// k0
"vmlal.s8 q2, d8, d30 \n"// k1
"vmlal.s8 q2, d9, d31 \n"// k2
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d10, d1 \n"// k3
"vmlal.s8 q2, d12, d30 \n"// k4
"vmlal.s8 q2, d13, d31 \n"// k5
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d14, d1 \n"// k6
"vmlal.s8 q2, d16, d30 \n"// k7
"vmlal.s8 q2, d17, d31 \n"// k8
"pld [%2, #128] \n"
"vld1.32 {d18-d21}, [%2] \n"// sum0n
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
asm volatile(
"pld [%3, #128] \n"
"vld1.32 {d0-d1}, [%3] \n"// r0
"add %3, #6 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%4, #128] \n"
"vld1.32 {d6-d7}, [%4] \n"// r1
"add %4, #6 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%5, #128] \n"
"vld1.32 {d10-d11}, [%5] \n"// r2
"add %5, #6 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%6, #128] \n"
"vld1.32 {d14-d15}, [%6] \n"// r3
"add %6, #6 \n"
"vext.8 d16, d14, d15, #1 \n"
"vext.8 d17, d14, d15, #2 \n"
"pld [%1, #128] \n"
"vld1.32 {d18-d20}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d20}, [%1]! \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d6, d1 \n"// k0
"vmlal.s8 q2, d8, d30 \n"// k1
"vmlal.s8 q2, d9, d31 \n"// k2
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d10, d1 \n"// k3
"vmlal.s8 q2, d12, d30 \n"// k4
"vmlal.s8 q2, d13, d31 \n"// k5
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d14, d1 \n"// k6
"vmlal.s8 q2, d16, d30 \n"// k7
"vmlal.s8 q2, d17, d31 \n"// k8
"pld [%2, #128] \n"
"vld1.32 {d18-d20}, [%2] \n"// sum0n
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d20}, [%2]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr0n += outw;
}
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
asm volatile(
"vld1.8 {d26-d27}, [%0] \n"
: "=r"(kernel0) // %0
: "0"(kernel0)
: "cc", "memory"
);
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%2, #128] \n"
"vld1.32 {d0-d1}, [%2] \n"// r0
"add %2, #8 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%3, #128] \n"
"vld1.32 {d6-d7}, [%3] \n"// r1
"add %3, #8 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%4, #128] \n"
"vld1.32 {d10-d11}, [%4] \n"// r2
"add %4, #8 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%1, #128] \n"
"vld1.32 {d18-d21}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d21}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
asm volatile(
"pld [%2, #128] \n"
"vld1.32 {d0-d1}, [%2] \n"// r0
"add %2, #6 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%3, #128] \n"
"vld1.32 {d6-d7}, [%3] \n"// r1
"add %3, #6 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%4, #128] \n"
"vld1.32 {d10-d11}, [%4] \n"// r2
"add %4, #6 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%1, #128] \n"
"vld1.32 {d18-d20}, [%1] \n"// sum0
"vaddw.s16 q9, q9, d4 \n"
"vaddw.s16 q10, q10, d5 \n"
"vst1.32 {d18-d20}, [%1]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
}
static void conv3x3s2_neon_s8(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const signed char* kernel = _kernel;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p + 1);
out0.fill(0.f);
out1.fill(0.f);
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9;
const signed char* kernel1 = (const signed char*)kernel + (p + 1) * inch * 9;
for (int q=0; q<inch; q++)
{
int* outptr0 = out0;
int* outptr1 = out1;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
const signed char* k00 = kernel0;
const signed char* k01 = kernel0 + 3;
const signed char* k02 = kernel0 + 6;
const signed char* k10 = kernel1;
const signed char* k11 = kernel1 + 3;
const signed char* k12 = kernel1 + 6;
int i = 0;
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
asm volatile(
"vld1.s8 {d22-d23}, [%0] \n"
"vld1.s8 {d24-d25}, [%1] \n"
: "=r"(kernel0), // %0
"=r"(kernel1) // %1
: "0"(kernel0),
"1"(kernel1)
: "cc", "memory"
);
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%3, #192] \n"
"vld2.s8 {d0-d1}, [%3]! \n" // r0
"vld2.s8 {d2-d3}, [%3] \n"
"vext.8 d3, d0, d2, #1 \n"
"vdup.s8 d26, d22[0] \n"
"vdup.s8 d27, d22[1] \n"
"vdup.s8 d28, d22[2] \n"
"vmull.s8 q2, d0, d26 \n" // k00
"vmlal.s8 q2, d1, d27 \n" // k01
"vmlal.s8 q2, d3, d28 \n" // k02
"pld [%4, #192] \n"
"vld2.s8 {d6-d7}, [%4]! \n" // r1
"vld2.s8 {d8-d9}, [%4] \n"
"vext.8 d9, d6, d8, #1 \n"
"vdup.s8 d26, d22[3] \n"
"vdup.s8 d27, d22[4] \n"
"vdup.s8 d28, d22[5] \n"
"vmlal.s8 q2, d6, d26 \n" // k03
"vmlal.s8 q2, d7, d27 \n" // k04
"vmlal.s8 q2, d9, d28 \n" // k05
"pld [%5, #192] \n"
"vld2.s8 {d10-d11}, [%5]! \n" // r2
"vld2.s8 {d12-d13}, [%5] \n"
"vext.8 d13, d10, d12, #1 \n"
"vdup.s8 d26, d22[6] \n"
"vdup.s8 d27, d22[7] \n"
"vdup.s8 d28, d23[0] \n"
"vmlal.s8 q2, d10, d26 \n" // k06
"vmlal.s8 q2, d11, d27 \n" // k07
"vmlal.s8 q2, d13, d28 \n" // k08
"pld [%1, #256] \n"
"vld1.32 {d14-d17}, [%1] \n" //sum0
"vaddw.s16 q7, q7, d4 \n"
"vaddw.s16 q8, q8, d5 \n"
"vst1.32 {d14-d17}, [%1]! \n"
"vdup.s8 d26, d24[0] \n"
"vdup.s8 d27, d24[1] \n"
"vdup.s8 d28, d24[2] \n"
"vmull.s8 q2, d0, d26 \n" // k00
"vmlal.s8 q2, d1, d27 \n" // k01
"vmlal.s8 q2, d3, d28 \n" // k02
"vdup.s8 d26, d24[3] \n"
"vdup.s8 d27, d24[4] \n"
"vdup.s8 d28, d24[5] \n"
"vmlal.s8 q2, d6, d26 \n" // k03
"vmlal.s8 q2, d7, d27 \n" // k04
"vmlal.s8 q2, d9, d28 \n" // k05
"vdup.s8 d26, d24[6] \n"
"vdup.s8 d27, d24[7] \n"
"vdup.s8 d28, d25[0] \n"
"vmlal.s8 q2, d10, d26 \n" // k06
"vmlal.s8 q2, d11, d27 \n" // k07
"vmlal.s8 q2, d13, d28 \n" // k08
"pld [%2, #256] \n"
"vld1.32 {d14-d17}, [%2] \n" //sum1
"vaddw.s16 q7, q7, d4 \n"
"vaddw.s16 q8, q8, d5 \n"
"vst1.32 {d14-d17}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q13", "q14", "q15"
);
}
if (remain >= 4)
{
remain -= 4;
asm volatile(
"pld [%3, #192] \n"
"vld2.s8 {d0-d1}, [%3]! \n" // r0
"vld2.s8 {d2-d3}, [%3] \n"
"vext.8 d3, d0, d2, #1 \n"
"vdup.s8 d26, d22[0] \n"
"vdup.s8 d27, d22[1] \n"
"vdup.s8 d28, d22[2] \n"
"vmull.s8 q2, d0, d26 \n" // k00
"vmlal.s8 q2, d1, d27 \n" // k01
"vmlal.s8 q2, d3, d28 \n" // k02
"pld [%4, #192] \n"
"vld2.s8 {d6-d7}, [%4]! \n" // r1
"vld2.s8 {d8-d9}, [%4] \n"
"vext.8 d9, d6, d8, #1 \n"
"vdup.s8 d26, d22[3] \n"
"vdup.s8 d27, d22[4] \n"
"vdup.s8 d28, d22[5] \n"
"vmlal.s8 q2, d6, d26 \n" // k03
"vmlal.s8 q2, d7, d27 \n" // k04
"vmlal.s8 q2, d9, d28 \n" // k05
"pld [%5, #192] \n"
"vld2.s8 {d10-d11}, [%5]! \n" // r2
"vld2.s8 {d12-d13}, [%5] \n"
"vext.8 d13, d10, d12, #1 \n"
"sub %3, #8 \n"
"sub %4, #8 \n"
"sub %5, #8 \n"
"vdup.s8 d26, d22[6] \n"
"vdup.s8 d27, d22[7] \n"
"vdup.s8 d28, d23[0] \n"
"vmlal.s8 q2, d10, d26 \n" // k06
"vmlal.s8 q2, d11, d27 \n" // k07
"vmlal.s8 q2, d13, d28 \n" // k08
"pld [%1, #128] \n"
"vld1.32 {d14-d15}, [%1] \n" //sum0
"vaddw.s16 q7, q7, d4 \n"
"vst1.32 {d14-d15}, [%1]! \n"
"vdup.s8 d26, d24[0] \n"
"vdup.s8 d27, d24[1] \n"
"vdup.s8 d28, d24[2] \n"
"vmull.s8 q2, d0, d26 \n" // k00
"vmlal.s8 q2, d1, d27 \n" // k01
"vmlal.s8 q2, d3, d28 \n" // k02
"vdup.s8 d26, d24[3] \n"
"vdup.s8 d27, d24[4] \n"
"vdup.s8 d28, d24[5] \n"
"vmlal.s8 q2, d6, d26 \n" // k03
"vmlal.s8 q2, d7, d27 \n" // k04
"vmlal.s8 q2, d9, d28 \n" // k05
"vdup.s8 d26, d24[6] \n"
"vdup.s8 d27, d24[7] \n"
"vdup.s8 d28, d25[0] \n"
"vmlal.s8 q2, d10, d26 \n" // k06
"vmlal.s8 q2, d11, d27 \n" // k07
"vmlal.s8 q2, d13, d28 \n" // k08
"pld [%2, #128] \n"
"vld1.32 {d14-d15}, [%2] \n" //sum1
"vaddw.s16 q7, q7, d4 \n"
"vst1.32 {d14-d15}, [%2]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q13", "q14", "q15"
);
}
for (; remain>0; remain--)
{
int sum0 = 0;
int sum1 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
sum1 += (int)r0[0] * kernel1[0];
sum1 += (int)r0[1] * kernel1[1];
sum1 += (int)r0[2] * kernel1[2];
sum1 += (int)r1[0] * kernel1[3];
sum1 += (int)r1[1] * kernel1[4];
sum1 += (int)r1[2] * kernel1[5];
sum1 += (int)r2[0] * kernel1[6];
sum1 += (int)r2[1] * kernel1[7];
sum1 += (int)r2[2] * kernel1[8];
*outptr0 += sum0;
*outptr1 += sum1;
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
outptr1++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
kernel1 += 9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0.f);
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9;
for (int q=0; q<inch; q++)
{
int* outptr0 = out0;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
const signed char* k00 = kernel0;
const signed char* k01 = kernel0 + 3;
const signed char* k02 = kernel0 + 6;
int i = 0;
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
asm volatile(
"vld1.s8 {d22-d23}, [%0] \n"
: "=r"(kernel0) // %0
: "0"(kernel0)
: "cc", "memory"
);
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%2, #192] \n"
"vld2.s8 {d0-d1}, [%2]! \n" // r0
"vld2.s8 {d2-d3}, [%2] \n"
"vext.8 d3, d0, d2, #1 \n"
"vdup.s8 d26, d22[0] \n"
"vdup.s8 d27, d22[1] \n"
"vdup.s8 d28, d22[2] \n"
"vmull.s8 q2, d0, d26 \n" // k00
"vmlal.s8 q2, d1, d27 \n" // k01
"vmlal.s8 q2, d3, d28 \n" // k02
"pld [%3, #192] \n"
"vld2.s8 {d6-d7}, [%3]! \n" // r1
"vld2.s8 {d8-d9}, [%3] \n"
"vext.8 d9, d6, d8, #1 \n"
"vdup.s8 d26, d22[3] \n"
"vdup.s8 d27, d22[4] \n"
"vdup.s8 d28, d22[5] \n"
"vmlal.s8 q2, d6, d26 \n" // k03
"vmlal.s8 q2, d7, d27 \n" // k04
"vmlal.s8 q2, d9, d28 \n" // k05
"pld [%4, #192] \n"
"vld2.s8 {d10-d11}, [%4]! \n" // r2
"vld2.s8 {d12-d13}, [%4] \n"
"vext.8 d13, d10, d12, #1 \n"
"vdup.s8 d26, d22[6] \n"
"vdup.s8 d27, d22[7] \n"
"vdup.s8 d28, d23[0] \n"
"vmlal.s8 q2, d10, d26 \n" // k06
"vmlal.s8 q2, d11, d27 \n" // k07
"vmlal.s8 q2, d13, d28 \n" // k08
"pld [%1, #256] \n"
"vld1.32 {d14-d17}, [%1] \n" //sum0
"vaddw.s16 q7, q7, d4 \n"
"vaddw.s16 q8, q8, d5 \n"
"vst1.32 {d14-d17}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q12", "q13", "q14"
);
}
if (remain >= 4)
{
remain -= 4;
asm volatile(
"pld [%2, #192] \n"
"vld2.s8 {d0-d1}, [%2]! \n" // r0
"vld2.s8 {d2-d3}, [%2] \n"
"vext.8 d3, d0, d2, #1 \n"
"vdup.s8 d26, d22[0] \n"
"vdup.s8 d27, d22[1] \n"
"vdup.s8 d28, d22[2] \n"
"vmull.s8 q2, d0, d26 \n" // k00
"vmlal.s8 q2, d1, d27 \n" // k01
"vmlal.s8 q2, d3, d28 \n" // k02
"pld [%3, #192] \n"
"vld2.s8 {d6-d7}, [%3]! \n" // r1
"vld2.s8 {d8-d9}, [%3] \n"
"vext.8 d9, d6, d8, #1 \n"
"vdup.s8 d26, d22[3] \n"
"vdup.s8 d27, d22[4] \n"
"vdup.s8 d28, d22[5] \n"
"vmlal.s8 q2, d6, d26 \n" // k03
"vmlal.s8 q2, d7, d27 \n" // k04
"vmlal.s8 q2, d9, d28 \n" // k05
"pld [%4, #192] \n"
"vld2.s8 {d10-d11}, [%4]! \n" // r2
"vld2.s8 {d12-d13}, [%4] \n"
"vext.8 d13, d10, d12, #1 \n"
"sub %2, #8 \n"
"sub %3, #8 \n"
"sub %4, #8 \n"
"vdup.s8 d26, d22[6] \n"
"vdup.s8 d27, d22[7] \n"
"vdup.s8 d28, d23[0] \n"
"vmlal.s8 q2, d10, d26 \n" // k06
"vmlal.s8 q2, d11, d27 \n" // k07
"vmlal.s8 q2, d13, d28 \n" // k08
"pld [%1, #128] \n"
"vld1.32 {d14-d15}, [%1] \n" //sum0
"vaddw.s16 q7, q7, d4 \n"
"vst1.32 {d14-d15}, [%1]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q12", "q13", "q14"
);
}
for (; remain>0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
}
static void conv3x3s1_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int outw = top_blob.w;
int remain = outw & 7;
typedef void (*conv_func_int8)(const Mat&, Mat&, const Mat&, const Option&);
conv_func_int8 conv_func_table[8] =
{
conv3x3s1_neon_s8, //0
conv3x3s1_neon_s8, //1
conv3x3s1_neon_s8, //2
conv3x3s1_neon_s8, //3
conv3x3s1_neon_s8_left4, //4
conv3x3s1_neon_s8, //5
conv3x3s1_neon_s8_left6, //6
conv3x3s1_neon_s8, //7
};
conv_func_int8 conv = conv_func_table[remain];
conv(bottom_blob, top_blob, _kernel, opt);
return;
}
static void conv3x3s1_packed_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
//int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp < nn_outch; pp++)
{
int p = pp * 4;
Mat out0 = top_blob.channel(p+0);
Mat out1 = top_blob.channel(p+1);
Mat out2 = top_blob.channel(p+2);
Mat out3 = top_blob.channel(p+3);
out0.fill(0);
out1.fill(0);
out2.fill(0);
out3.fill(0);
const signed char* ktmp = _kernel.channel(p/4);
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
int* outptr1 = out1;
int* outptr2 = out2;
int* outptr3 = out3;
const signed char *img0 = bottom_blob.channel(q);
const signed char *r0 = img0;
const signed char *r1 = img0 + w;
const signed char *r2 = img0 + w * 2;
const signed char *r3 = img0 + w * 3;
int i = 0;
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
if (nn > 0)
{
asm volatile(
"0: \n"
// "pld [%9, #256] \n"
"vld1.s8 {d0-d3}, [%9]! \n"// d0=k00 k01 d1=k02 k10 d2=k11 k12 d3=k20 k21
"pld [%5, #128] \n"
"vld1.s8 {d4-d5}, [%5] \n"// d4=r00 d5=r00n
"add %5, #8 \n"
"vdup.s8 d8, d0[0] \n"
"vdup.s8 d9, d0[1] \n"
"pld [%6, #128] \n"
"vld1.s8 {d6-d7}, [%6] \n"// d6=r10 d7=r10n
"add %6, #8 \n"
"vdup.s8 d10, d0[2] \n"
"vdup.s8 d11, d0[3] \n"
"vmull.s8 q8, d4, d8 \n"
"vmull.s8 q9, d4, d9 \n"
"vdup.s8 d12, d0[4] \n"
"vdup.s8 d13, d0[5] \n"
"vmull.s8 q10, d4, d10 \n"
"vmull.s8 q11, d4, d11 \n"
"vdup.s8 d14, d0[6] \n"
"vdup.s8 d15, d0[7] \n"
"vmull.s8 q12, d6, d8 \n"
"vmull.s8 q13, d6, d9 \n"
"vext.s8 q2, q2, q2, #1 \n"// d4=r01
"vmull.s8 q14, d6, d10 \n"
"vmull.s8 q15, d6, d11 \n"
"vext.s8 q3, q3, q3, #1 \n"// d6=r11
"vmlal.s8 q8, d4, d12 \n"
"vmlal.s8 q9, d4, d13 \n"
"vdup.s8 d8, d1[0] \n"
"vdup.s8 d9, d1[1] \n"
"vmlal.s8 q10, d4, d14 \n"
"vmlal.s8 q11, d4, d15 \n"
"vdup.s8 d10, d1[2] \n"
"vdup.s8 d11, d1[3] \n"
"vmlal.s8 q12, d6, d12 \n"
"vmlal.s8 q13, d6, d13 \n"
"vext.s8 q2, q2, q2, #1 \n"// d4=r02
"vmlal.s8 q14, d6, d14 \n"
"vmlal.s8 q15, d6, d15 \n"
"vext.s8 q3, q3, q3, #1 \n"// d6=r12
"vmlal.s8 q8, d4, d8 \n"
"vmlal.s8 q9, d4, d9 \n"
"vdup.s8 d12, d1[4] \n"
"vdup.s8 d13, d1[5] \n"
"vmlal.s8 q10, d4, d10 \n"
"vmlal.s8 q11, d4, d11 \n"
"vdup.s8 d14, d1[6] \n"
"vdup.s8 d15, d1[7] \n"
"vmlal.s8 q12, d6, d8 \n"
"vmlal.s8 q13, d6, d9 \n"
"pld [%7, #128] \n"
"vld1.s8 {d4-d5}, [%7] \n"// d4=r20 d5=r20n
"add %7, #8 \n"
"vmlal.s8 q14, d6, d10 \n"
"vmlal.s8 q15, d6, d11 \n"
///
"vext.s8 q3, q3, q3, #14 \n"// d6=r10
"vmlal.s8 q8, d6, d12 \n"
"vmlal.s8 q9, d6, d13 \n"
"vdup.s8 d8, d2[0] \n"
"vdup.s8 d9, d2[1] \n"
"vmlal.s8 q10, d6, d14 \n"
"vmlal.s8 q11, d6, d15 \n"
"vdup.s8 d10, d2[2] \n"
"vdup.s8 d11, d2[3] \n"
"vmlal.s8 q12, d4, d12 \n"
"vmlal.s8 q13, d4, d13 \n"
"vext.s8 q3, q3, q3, #1 \n"// d6=r11
"vmlal.s8 q14, d4, d14 \n"
"vmlal.s8 q15, d4, d15 \n"
"vext.s8 q2, q2, q2, #1 \n"// d4=r21
"vmlal.s8 q8, d6, d8 \n"
"vmlal.s8 q9, d6, d9 \n"
"vdup.s8 d12, d2[4] \n"
"vdup.s8 d13, d2[5] \n"
"vmlal.s8 q10, d6, d10 \n"
"vmlal.s8 q11, d6, d11 \n"
"vdup.s8 d14, d2[6] \n"
"vdup.s8 d15, d2[7] \n"
"vmlal.s8 q12, d4, d8 \n"
"vmlal.s8 q13, d4, d9 \n"
"vext.s8 q3, q3, q3, #1 \n"// d6=r12
"vmlal.s8 q14, d4, d10 \n"
"vmlal.s8 q15, d4, d11 \n"
"vext.s8 q2, q2, q2, #1 \n"// d4=r22
"vmlal.s8 q8, d6, d12 \n"
"vmlal.s8 q9, d6, d13 \n"
"vdup.s8 d8, d3[0] \n"
"vdup.s8 d9, d3[1] \n"
"vmlal.s8 q10, d6, d14 \n"
"vmlal.s8 q11, d6, d15 \n"
"vdup.s8 d10, d3[2] \n"
"vdup.s8 d11, d3[3] \n"
"vmlal.s8 q12, d4, d12 \n"
"vmlal.s8 q13, d4, d13 \n"
"pld [%8, #128] \n"
"vld1.s8 {d6-d7}, [%8] \n"// d6=r30 d6=r30n
"add %8, #8 \n"
"vmlal.s8 q14, d4, d14 \n"
"vmlal.s8 q15, d4, d15 \n"
///
"vext.s8 q2, q2, q2, #14 \n"// d4=r20
"vmlal.s8 q8, d4, d8 \n"
"vmlal.s8 q9, d4, d9 \n"
"vdup.s8 d12, d3[4] \n"
"vdup.s8 d13, d3[5] \n"
"vmlal.s8 q10, d4, d10 \n"
"vmlal.s8 q11, d4, d11 \n"
"vdup.s8 d14, d3[6] \n"
"vdup.s8 d15, d3[7] \n"
"vmlal.s8 q12, d6, d8 \n"
"vmlal.s8 q13, d6, d9 \n"
"vext.s8 q2, q2, q2, #1 \n"// d4=r21
"vmlal.s8 q14, d6, d10 \n"
"vmlal.s8 q15, d6, d11 \n"
"vext.s8 q3, q3, q3, #1 \n"// d6=r31
// "pld [%9, #128] \n"
"vld1.s8 {d0}, [%9] \n"
"add %9, #4 \n"
"vmlal.s8 q8, d4, d12 \n"
"vmlal.s8 q9, d4, d13 \n"
"vdup.s8 d8, d0[0] \n"
"vdup.s8 d9, d0[1] \n"
"vmlal.s8 q10, d4, d14 \n"
"vmlal.s8 q11, d4, d15 \n"
"vdup.s8 d10, d0[2] \n"
"vdup.s8 d11, d0[3] \n"
"vmlal.s8 q12, d6, d12 \n"
"vmlal.s8 q13, d6, d13 \n"
"vext.s8 q2, q2, q2, #1 \n"// d4=r22
"vmlal.s8 q14, d6, d14 \n"
"vmlal.s8 q15, d6, d15 \n"
"vext.s8 q3, q3, q3, #1 \n"// d6=r32
"vmlal.s8 q8, d4, d8 \n"
"vmlal.s8 q9, d4, d9 \n"
"pld [%1, #256] \n"
"vld1.s32 {d12-d15}, [%1] \n"
"vmlal.s8 q10, d4, d10 \n"
"vmlal.s8 q11, d4, d11 \n"
"pld [%2, #256] \n"
"vld1.s32 {d0-d3}, [%2] \n"
"vaddw.s16 q6, q6, d16 \n"
"vaddw.s16 q7, q7, d17 \n"
"vaddw.s16 q0, q0, d18 \n"
"vaddw.s16 q1, q1, d19 \n"
"pld [%3, #256] \n"
"vld1.s32 {d16-d19}, [%3] \n"
"vmlal.s8 q12, d6, d8 \n"
"vmlal.s8 q13, d6, d9 \n"
"vst1.s32 {d12-d15}, [%1] \n"
"add %1, %1, %20, lsl #2 \n"
"vmlal.s8 q14, d6, d10 \n"
"vmlal.s8 q15, d6, d11 \n"
"pld [%4, #256] \n"
"vld1.s32 {d4-d7}, [%4] \n"
"vst1.s32 {d0-d3}, [%2] \n"
"add %2, %2, %20, lsl #2 \n"
"vaddw.s16 q8, q8, d20 \n"
"vaddw.s16 q9, q9, d21 \n"
"pld [%1, #256] \n"
"vld1.s32 {d12-d15}, [%1] \n"
"vaddw.s16 q2, q2, d22 \n"
"vaddw.s16 q3, q3, d23 \n"
///
"pld [%2, #256] \n"
"vld1.s32 {d0-d3}, [%2] \n"
"vaddw.s16 q6, q6, d24 \n"
"vst1.s32 {d16-d19}, [%3] \n"
"add %3, %3, %20, lsl #2 \n"
"vaddw.s16 q7, q7, d25 \n"
"pld [%3, #256] \n"
"vld1.s32 {d8-d11}, [%3] \n"
"vaddw.s16 q0, q0, d26 \n"
"vst1.s32 {d4-d7}, [%4] \n"
"add %4, %4, %20, lsl #2 \n"
///
"vaddw.s16 q1, q1, d27 \n"
"pld [%4, #256] \n"
"vld1.s32 {d4-d7}, [%4] \n"
"vaddw.s16 q4, q4, d28 \n"
"vst1.s32 {d12-d15}, [%1]! \n"
"vaddw.s16 q5, q5, d29 \n"
"vst1.s32 {d0-d3}, [%2]! \n"
"vaddw.s16 q2, q2, d30 \n"
"vst1.s32 {d8-d11}, [%3]! \n"
"vaddw.s16 q3, q3, d31 \n"
"sub %9, #36 \n"
"subs %0, #1 \n"
"sub %1, %1, %20, lsl #2 \n"
"sub %2, %2, %20, lsl #2 \n"
"sub %3, %3, %20, lsl #2 \n"
"vst1.s32 {d4-d7}, [%4]! \n"
"sub %4, %4, %20, lsl #2 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3), // %8
"=r"(ktmp) // %9
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"9"(ktmp),
"r"(outw) // %20
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif
#if __ARM_NEON
if (remain >= 4)
{
remain -= 4;
asm volatile(
"vld1.s8 {d0}, [%4] \n"// d0 = r00 r00n
"add %4, #4 \n"
"vld1.s8 {d2}, [%5] \n"// d2 = r10 r10n
"add %5, #4 \n"
"vld1.s8 {d1}, [%6] \n"// d1 = r20 r20n
"add %6, #4 \n"
"vld1.s8 {d3}, [%7] \n"// d3 = r30 r30n
"add %7, #4 \n"
"vext.s8 d4, d0, d0, #1 \n"// d4 = r01
"vext.s8 d6, d2, d2, #1 \n"// d6 = r11
"vext.s8 d5, d1, d1, #1 \n"// d5 = r21
"vext.s8 d7, d3, d3, #1 \n"// d7 = r31
"vext.s8 d8, d0, d0, #2 \n"// d8 = r02
"vext.s8 d10, d2, d2, #2 \n"// d10 = r12
"vext.s8 d9, d1, d1, #2 \n"// d9 = r22
"vext.s8 d11, d3, d3, #2 \n"// d11 = r32
"vld1.s8 {d12-d15}, [%8]! \n"// d12=k00 k01 d13=k02 k10 d14=k11 k12 d15=k20 k21
"vsli.64 q0, q1, #32 \n"// d0 = r00 r10 d1 = r20 r30
/// r00 r20 r10
"vdup.s8 d24, d12[0] \n"
"vdup.s8 d25, d12[1] \n"
"vdup.s8 d26, d12[2] \n"
"vdup.s8 d27, d12[3] \n"
"vmull.s8 q8, d0, d24 \n"
"vmull.s8 q9, d0, d25 \n"
"vdup.s8 d28, d15[0] \n"
"vdup.s8 d29, d15[1] \n"
"vmull.s8 q10, d0, d26 \n"
"vmull.s8 q11, d0, d27 \n"
"vdup.s8 d30, d15[2] \n"
"vdup.s8 d31, d15[3] \n"
"vmlal.s8 q8, d1, d28 \n"
"vmlal.s8 q9, d1, d29 \n"
"vext.s8 d0, d0, d1, #4 \n"// d0 = r10 r20
"vdup.s8 d24, d13[4] \n"
"vdup.s8 d25, d13[5] \n"
"vmlal.s8 q10, d1, d30 \n"
"vmlal.s8 q11, d1, d31 \n"
"vdup.s8 d26, d13[6] \n"
"vdup.s8 d27, d13[7] \n"
"vmlal.s8 q8, d0, d24 \n"
"vmlal.s8 q9, d0, d25 \n"
/// r01 r21 r11
"vsli.64 q2, q3, #32 \n"// d4 = r01 r11 d5 = r21 r31
"vdup.s8 d28, d12[4] \n"
"vdup.s8 d29, d12[5] \n"
"vmlal.s8 q10, d0, d26 \n"
"vmlal.s8 q11, d0, d27 \n"
"vdup.s8 d30, d12[6] \n"
"vdup.s8 d31, d12[7] \n"
"vmlal.s8 q8, d4, d28 \n"
"vmlal.s8 q9, d4, d29 \n"
"vdup.s8 d24, d15[4] \n"
"vdup.s8 d25, d15[5] \n"
"vmlal.s8 q10, d4, d30 \n"
"vmlal.s8 q11, d4, d31 \n"
"vdup.s8 d26, d15[6] \n"
"vdup.s8 d27, d15[7] \n"
"vmlal.s8 q8, d5, d24 \n"
"vmlal.s8 q9, d5, d25 \n"
"vext.s8 d4, d4, d5, #4 \n"// d4 = r11 r21
"vdup.s8 d28, d14[0] \n"
"vdup.s8 d29, d14[1] \n"
"vmlal.s8 q10, d5, d26 \n"
"vmlal.s8 q11, d5, d27 \n"
"vdup.s8 d30, d14[2] \n"
"vdup.s8 d31, d14[3] \n"
"vmlal.s8 q8, d4, d28 \n"
"vmlal.s8 q9, d4, d29 \n"
/// r02 r22 r12
"vsli.64 q4, q5, #32 \n"// d8 = r02 r12 d9 = r22 r32
"vld1.s8 {d12}, [%8] \n"// d12=k22
"add %8, #4 \n"
"vdup.s8 d24, d13[0] \n"
"vdup.s8 d25, d13[1] \n"
"vmlal.s8 q10, d4, d30 \n"
"vmlal.s8 q11, d4, d31 \n"
"vdup.s8 d26, d13[2] \n"
"vdup.s8 d27, d13[3] \n"
"vmlal.s8 q8, d8, d24 \n"
"vmlal.s8 q9, d8, d25 \n"
"vdup.s8 d28, d12[0] \n"
"vdup.s8 d29, d12[1] \n"
"vmlal.s8 q10, d8, d26 \n"
"vmlal.s8 q11, d8, d27 \n"
"vdup.s8 d30, d12[2] \n"
"vdup.s8 d31, d12[3] \n"
"vmlal.s8 q8, d9, d28 \n"
"vmlal.s8 q9, d9, d29 \n"
"vext.s8 d8, d8, d9, #4 \n"// d8 = r12 r22
"vdup.s8 d24, d14[4] \n"
"vdup.s8 d25, d14[5] \n"
"vld1.s32 {d0-d1}, [%0] \n"
"vmlal.s8 q10, d9, d30 \n"
"vmlal.s8 q11, d9, d31 \n"
"vdup.s8 d26, d14[6] \n"
"vdup.s8 d27, d14[7] \n"
"vld1.s32 {d2-d3}, [%1] \n"
"vmlal.s8 q8, d8, d24 \n"
"vmlal.s8 q9, d8, d25 \n"
"vld1.s32 {d4-d5}, [%2] \n"
"vmlal.s8 q10, d8, d26 \n"
"vmlal.s8 q11, d8, d27 \n"
"vld1.s32 {d6-d7}, [%3] \n"
"vaddw.s16 q0, q0, d16 \n"
"vaddw.s16 q1, q1, d18 \n"
"vst1.s32 {d0-d1}, [%0] \n"
"add %0, %0, %18, lsl #2 \n"
"vaddw.s16 q2, q2, d20 \n"
"vst1.s32 {d2-d3}, [%1] \n"
"add %1, %1, %18, lsl #2 \n"
"vaddw.s16 q3, q3, d22 \n"
"vld1.s32 {d8-d9}, [%0] \n"
"vld1.s32 {d10-d11}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"add %2, %2, %18, lsl #2 \n"
"vst1.s32 {d6-d7}, [%3] \n"
"add %3, %3, %18, lsl #2 \n"
"vld1.s32 {d28-d29}, [%2] \n"
"vld1.s32 {d30-d31}, [%3] \n"
"vaddw.s16 q4, q4, d17 \n"
"vaddw.s16 q5, q5, d19 \n"
"sub %8, #36 \n"
"vst1.s32 {d8-d9}, [%0]! \n"
"vaddw.s16 q14, q14, d21 \n"
"vaddw.s16 q15, q15, d23 \n"
"vst1.s32 {d10-d11}, [%1]! \n"
"sub %0, %0, %18, lsl #2 \n"
"sub %1, %1, %18, lsl #2 \n"
"vst1.s32 {d28-d29}, [%2]! \n"
"vst1.s32 {d30-d31}, [%3]! \n"
"sub %2, %2, %18, lsl #2 \n"
"sub %3, %3, %18, lsl #2 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(r0), // %4
"=r"(r1), // %5
"=r"(r2), // %6
"=r"(r3), // %7
"=r"(ktmp) // %8
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(r0),
"5"(r1),
"6"(r2),
"7"(r3),
"8"(ktmp),
"r"(outw) // %18
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif
for (; remain>0; remain--)
{
#if __ARM_NEON
asm volatile(
"vld1.s8 {d0[]}, [%4]! \n"// d0 = 00 00
"vld1.s8 {d1[]}, [%4]! \n"// d1 = 01 01
"vld1.s8 {d2[]}, [%4] \n"// d2 = 02 02
"sub %4, %4, #2 \n"
"vld1.s8 {d3[]}, [%5]! \n"// d3 = 10 10
"vld1.s8 {d4[]}, [%5]! \n"// d4 = 11 11
"vld1.s8 {d5[]}, [%5] \n"// d5 = 12 12
"sub %5, %5, #2 \n"
"vld1.s8 {d6[]}, [%6]! \n"// d6 = 20 20
"vld1.s8 {d7[]}, [%6]! \n"// d7 = 21 21
"vld1.s8 {d8[]}, [%6] \n"// d8 = 22 22
"sub %6, %6, #2 \n"
"vld1.s8 {d9[]}, [%7]! \n"// d9 = 30 30
"vld1.s8 {d10[]}, [%7]! \n"// d10 = 31 31
"vld1.s8 {d11[]}, [%7] \n"// d11 = 32 32
"sub %7, %7, #2 \n"
"vld1.s8 {d12-d15}, [%8]! \n"// d12 d13 d14 d15 = 0~7
"vsli.64 d0, d1, #32 \n"// d0 = 00 01
"vsli.64 d3, d4, #32 \n"// d3 = 10 11
"vmull.s8 q8, d0, d12 \n"
"vsli.64 d2, d3, #32 \n"// d2 = 02 10
"vmull.s8 q9, d3, d12 \n"
"vsli.64 d5, d6, #32 \n"// d5 = 12 20
"vmlal.s8 q8, d2, d13 \n"
"vsli.64 d4, d5, #32 \n"// d4 = 11 12
"vmlal.s8 q9, d5, d13 \n"
"vsli.64 d7, d8, #32 \n"// d7 = 21 22
"vmlal.s8 q8, d4, d14 \n"
"vsli.64 d6, d7, #32 \n"// d6 = 20 21
"vmlal.s8 q9, d7, d14 \n"
"vsli.64 d9, d10, #32 \n"// d9 = 30 31
"vld1.s32 {d20[0]}, [%0] \n"
"vld1.s32 {d20[1]}, [%1] \n"
"add %0, %0, %18, lsl #2 \n"
"add %1, %1, %18, lsl #2 \n"
"vmlal.s8 q8, d6, d15 \n"
"vsli.64 d8, d11, #32 \n"// d8 = 22 32
"vmlal.s8 q9, d9, d15 \n"
"vld1.s8 {d14}, [%8] \n"
"add %8, #4 \n"
"vld1.s32 {d21[0]}, [%2] \n"
"vld1.s32 {d21[1]}, [%3] \n"
"add %2, %2, %18, lsl #2 \n"
"add %3, %3, %18, lsl #2 \n"
"vadd.s16 d12, d16, d17 \n"
"vadd.s16 d13, d18, d19 \n"// q6 = sum0123 sum0123n
"vsli.64 d14, d14, #32 \n"// d14 = 0~3 0~3
"vld1.s32 {d22[0]}, [%0] \n"
"vld1.s32 {d22[1]}, [%1] \n"
"vmlal.s8 q6, d8, d14 \n"
"sub %8, #36 \n"
///
"vld1.s32 {d23[0]}, [%2] \n"
"vld1.s32 {d23[1]}, [%3] \n"
"sub %0, %0, %18, lsl #2 \n"
"sub %1, %1, %18, lsl #2 \n"
// addw
"vaddw.s16 q10, q10, d12 \n"
"vaddw.s16 q11, q11, d13 \n"
"sub %2, %2, %18, lsl #2 \n"
"sub %3, %3, %18, lsl #2 \n"
"vst1.s32 {d20[0]}, [%0] \n"
"vst1.s32 {d20[1]}, [%1] \n"
"add %0, %0, %18, lsl #2 \n"
"add %1, %1, %18, lsl #2 \n"
"vst1.s32 {d21[0]}, [%2] \n"
"vst1.s32 {d21[1]}, [%3] \n"
"add %2, %2, %18, lsl #2 \n"
"add %3, %3, %18, lsl #2 \n"
"vst1.s32 {d22[0]}, [%0]! \n"
"vst1.s32 {d22[1]}, [%1]! \n"
"sub %0, %0, %18, lsl #2 \n"
"sub %1, %1, %18, lsl #2 \n"
"vst1.s32 {d23[0]}, [%2]! \n"
"vst1.s32 {d23[1]}, [%3]! \n"
"sub %2, %2, %18, lsl #2 \n"
"sub %3, %3, %18, lsl #2 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(r0), // %4
"=r"(r1), // %5
"=r"(r2), // %6
"=r"(r3), // %7
"=r"(ktmp) // %8
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(r0),
"5"(r1),
"6"(r2),
"7"(r3),
"8"(ktmp),
"r"(outw) // %18
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum0n = 0;
int sum1n = 0;
int sum2n = 0;
int sum3n = 0;
sum0 += r0[0] * ktmp[0];
sum1 += r0[0] * ktmp[1];
sum2 += r0[0] * ktmp[2];
sum3 += r0[0] * ktmp[3];
sum0 += r0[1] * ktmp[4];
sum1 += r0[1] * ktmp[5];
sum2 += r0[1] * ktmp[6];
sum3 += r0[1] * ktmp[7];
sum0n += r1[0] * ktmp[0];
sum1n += r1[0] * ktmp[1];
sum2n += r1[0] * ktmp[2];
sum3n += r1[0] * ktmp[3];
sum0n += r1[1] * ktmp[4];
sum1n += r1[1] * ktmp[5];
sum2n += r1[1] * ktmp[6];
sum3n += r1[1] * ktmp[7];
ktmp += 8;
sum0 += r0[2] * ktmp[0];
sum1 += r0[2] * ktmp[1];
sum2 += r0[2] * ktmp[2];
sum3 += r0[2] * ktmp[3];
sum0 += r1[0] * ktmp[4];
sum1 += r1[0] * ktmp[5];
sum2 += r1[0] * ktmp[6];
sum3 += r1[0] * ktmp[7];
sum0n += r1[2] * ktmp[0];
sum1n += r1[2] * ktmp[1];
sum2n += r1[2] * ktmp[2];
sum3n += r1[2] * ktmp[3];
sum0n += r2[0] * ktmp[4];
sum1n += r2[0] * ktmp[5];
sum2n += r2[0] * ktmp[6];
sum3n += r2[0] * ktmp[7];
ktmp += 8;
sum0 += r1[1] * ktmp[0];
sum1 += r1[1] * ktmp[1];
sum2 += r1[1] * ktmp[2];
sum3 += r1[1] * ktmp[3];
sum0 += r1[2] * ktmp[4];
sum1 += r1[2] * ktmp[5];
sum2 += r1[2] * ktmp[6];
sum3 += r1[2] * ktmp[7];
sum0n += r2[1] * ktmp[0];
sum1n += r2[1] * ktmp[1];
sum2n += r2[1] * ktmp[2];
sum3n += r2[1] * ktmp[3];
sum0n += r2[2] * ktmp[4];
sum1n += r2[2] * ktmp[5];
sum2n += r2[2] * ktmp[6];
sum3n += r2[2] * ktmp[7];
ktmp += 8;
///
sum0 += r2[0] * ktmp[0];
sum1 += r2[0] * ktmp[1];
sum2 += r2[0] * ktmp[2];
sum3 += r2[0] * ktmp[3];
sum0 += r2[1] * ktmp[4];
sum1 += r2[1] * ktmp[5];
sum2 += r2[1] * ktmp[6];
sum3 += r2[1] * ktmp[7];
sum0n += r3[0] * ktmp[0];
sum1n += r3[0] * ktmp[1];
sum2n += r3[0] * ktmp[2];
sum3n += r3[0] * ktmp[3];
sum0n += r3[1] * ktmp[4];
sum1n += r3[1] * ktmp[5];
sum2n += r3[1] * ktmp[6];
sum3n += r3[1] * ktmp[7];
ktmp += 8;
sum0 += r2[2] * ktmp[0];
sum1 += r2[2] * ktmp[1];
sum2 += r2[2] * ktmp[2];
sum3 += r2[2] * ktmp[3];
sum0n += r3[2] * ktmp[0];
sum1n += r3[2] * ktmp[1];
sum2n += r3[2] * ktmp[2];
sum3n += r3[2] * ktmp[3];
ktmp += 4;
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
*(outptr0 + outw) += sum0n;
*(outptr1 + outw) += sum1n;
*(outptr2 + outw) += sum2n;
*(outptr3 + outw) += sum3n;
ktmp -= 12*3;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
#endif
r0++;
r1++;
r2++;
r3++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr1 += outw;
outptr2 += outw;
outptr3 += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
if (nn > 0)
{
asm volatile(
"0: \n"
// "pld [%8, #256] \n"
"vld1.s8 {d0-d3}, [%8]! \n"// d0=k00 k01 d1=k02 k10 d2=k11 k12 d3=k20 k21
"pld [%5, #128] \n"
"vld1.s8 {d4-d5}, [%5] \n"// d4=r00 d5=r00n
"add %5, #8 \n"
"vdup.s8 d8, d0[0] \n"
"vdup.s8 d9, d0[1] \n"
"vdup.s8 d10, d0[2] \n"
"vdup.s8 d11, d0[3] \n"
"vmull.s8 q8, d4, d8 \n"
"vmull.s8 q9, d4, d9 \n"
"vext.s8 d24, d4, d5, #1 \n"// d24=r01
"vdup.s8 d12, d0[4] \n"
"vdup.s8 d13, d0[5] \n"
"vmull.s8 q10, d4, d10 \n"
"vmull.s8 q11, d4, d11 \n"
"vdup.s8 d14, d0[6] \n"
"vdup.s8 d15, d0[7] \n"
"vmlal.s8 q8, d24, d12 \n"
"vmlal.s8 q9, d24, d13 \n"
"vext.s8 d25, d4, d5, #2 \n"// d25=r02
"vdup.s8 d8, d1[0] \n"
"vdup.s8 d9, d1[1] \n"
"vmlal.s8 q10, d24, d14 \n"
"vmlal.s8 q11, d24, d15 \n"
"vdup.s8 d10, d1[2] \n"
"vdup.s8 d11, d1[3] \n"
"vmlal.s8 q8, d25, d8 \n"
"vmlal.s8 q9, d25, d9 \n"
"pld [%6, #128] \n"
"vld1.s8 {d6-d7}, [%6] \n"// d6=r10 d7=r10n
"add %6, #8 \n"
"vdup.s8 d12, d1[4] \n"
"vdup.s8 d13, d1[5] \n"
"vmlal.s8 q10, d25, d10 \n"
"vmlal.s8 q11, d25, d11 \n"
"vdup.s8 d14, d1[6] \n"
"vdup.s8 d15, d1[7] \n"
"vmlal.s8 q8, d6, d12 \n"
"vmlal.s8 q9, d6, d13 \n"
"vext.s8 d26, d6, d7, #1 \n"// d26=r11
"vdup.s8 d8, d2[0] \n"
"vdup.s8 d9, d2[1] \n"
"vmlal.s8 q10, d6, d14 \n"
"vmlal.s8 q11, d6, d15 \n"
"vdup.s8 d10, d2[2] \n"
"vdup.s8 d11, d2[3] \n"
"vmlal.s8 q8, d26, d8 \n"
"vmlal.s8 q9, d26, d9 \n"
"vext.s8 d27, d6, d7, #2 \n"// d27=r12
"vdup.s8 d12, d2[4] \n"
"vdup.s8 d13, d2[5] \n"
"vmlal.s8 q10, d26, d10 \n"
"vmlal.s8 q11, d26, d11 \n"
"vdup.s8 d14, d2[6] \n"
"vdup.s8 d15, d2[7] \n"
"vmlal.s8 q8, d27, d12 \n"
"vmlal.s8 q9, d27, d13 \n"
"pld [%7, #128] \n"
"vld1.s8 {d4-d5}, [%7] \n"// d4=r20 d5=r20n
"add %7, #8 \n"
"vdup.s8 d8, d3[0] \n"
"vdup.s8 d9, d3[1] \n"
"vmlal.s8 q10, d27, d14 \n"
"vmlal.s8 q11, d27, d15 \n"
"vdup.s8 d10, d3[2] \n"
"vdup.s8 d11, d3[3] \n"
"vmlal.s8 q8, d4, d8 \n"
"vmlal.s8 q9, d4, d9 \n"
"vext.s8 d24, d4, d5, #1 \n"// d24=r21
"vdup.s8 d12, d3[4] \n"
"vdup.s8 d13, d3[5] \n"
"vmlal.s8 q10, d4, d10 \n"
"vmlal.s8 q11, d4, d11 \n"
"vdup.s8 d14, d3[6] \n"
"vdup.s8 d15, d3[7] \n"
"vmlal.s8 q8, d24, d12 \n"
"vmlal.s8 q9, d24, d13 \n"
// "pld [%8, #128] \n"
"vld1.s8 {d0}, [%8] \n"
"add %8, #4 \n"
"vext.s8 d25, d4, d5, #2 \n"// d25=r22
"vdup.s8 d8, d0[0] \n"
"vdup.s8 d9, d0[1] \n"
"vmlal.s8 q10, d24, d14 \n"
"vmlal.s8 q11, d24, d15 \n"
"vdup.s8 d10, d0[2] \n"
"vdup.s8 d11, d0[3] \n"
"pld [%1, #256] \n"
"vld1.s32 {d12-d15}, [%1] \n"
"vmlal.s8 q8, d25, d8 \n"
"vmlal.s8 q9, d25, d9 \n"
"pld [%2, #256] \n"
"vld1.s32 {d0-d3}, [%2] \n"
"vaddw.s16 q6, q6, d16 \n"
"vaddw.s16 q7, q7, d17 \n"
"vmlal.s8 q10, d25, d10 \n"
"vmlal.s8 q11, d25, d11 \n"
"vaddw.s16 q0, q0, d18 \n"
"vaddw.s16 q1, q1, d19 \n"
"pld [%3, #256] \n"
"vld1.s32 {d16-d19}, [%3] \n"
"vst1.s32 {d12-d15}, [%1]! \n"
"pld [%4, #256] \n"
"vld1.s32 {d4-d7}, [%4] \n"
"vst1.s32 {d0-d3}, [%2]! \n"
"vaddw.s16 q8, q8, d20 \n"
"vaddw.s16 q9, q9, d21 \n"
"vaddw.s16 q2, q2, d22 \n"
"vaddw.s16 q3, q3, d23 \n"
"sub %8, #36 \n"
"vst1.s32 {d16-d19}, [%3]! \n"
"subs %0, #1 \n"
"vst1.s32 {d4-d7}, [%4]! \n"
"bne 0b \n"
: "=r"(nn),
"=r"(outptr0),
"=r"(outptr1),
"=r"(outptr2),
"=r"(outptr3),
"=r"(r0),
"=r"(r1),
"=r"(r2),
"=r"(ktmp)
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(ktmp)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13"
);
}
#endif
#if __ARM_NEON
if (remain >= 4)
{
remain -= 4;
asm volatile(
"vld1.s8 {d0}, [%4] \n"// d0 = r00 r00n
"add %4, #4 \n"
"vld1.s8 {d3}, [%5] \n"// d3 = r10 r10n
"add %5, #4 \n"
"vld1.s8 {d6}, [%6] \n"// d6 = r20 r20n
"add %6, #4 \n"
"vld1.s8 {d12-d15}, [%7]! \n"// d12=k00 k01 d13=k02 k10 d14=k11 k12 d15=k20 k21
/// r00 r01 r02
"vext.s8 d1, d0, d0, #1 \n"
"vext.s8 d2, d0, d0, #2 \n"
"vdup.s8 d24, d12[0] \n"
"vdup.s8 d26, d12[1] \n"
"vdup.s8 d25, d12[2] \n"
"vdup.s8 d27, d12[3] \n"
"vsli.64 d0, d0, #32 \n"// d0 = r00 r00
"vsli.64 d1, d1, #32 \n"// d1 = r01 r01
"vsli.64 d2, d2, #32 \n"// d2 = r02 r02
"vsli.64 q12, q13, #32 \n"
"vdup.s8 d28, d12[4] \n"
"vdup.s8 d30, d12[5] \n"
"vdup.s8 d29, d12[6] \n"
"vdup.s8 d31, d12[7] \n"
"vsli.64 q14, q15, #32 \n"
"vmull.s8 q8, d0, d24 \n"
"vmull.s8 q9, d0, d25 \n"
"vdup.s8 d24, d13[0] \n"
"vdup.s8 d26, d13[1] \n"
"vdup.s8 d25, d13[2] \n"
"vdup.s8 d27, d13[3] \n"
"vsli.64 q12, q13, #32 \n"
"vmlal.s8 q8, d1, d28 \n"
"vmlal.s8 q9, d1, d29 \n"
/// r10 r11 r12
"vext.s8 d4, d3, d3, #1 \n"
"vext.s8 d5, d3, d3, #2 \n"
"vdup.s8 d28, d13[4] \n"
"vdup.s8 d30, d13[5] \n"
"vdup.s8 d29, d13[6] \n"
"vdup.s8 d31, d13[7] \n"
"vsli.64 d3, d3, #32 \n"// d3 = r10 r10
"vsli.64 d4, d4, #32 \n"// d4 = r11 r11
"vsli.64 d5, d5, #32 \n"// d5 = r12 r12
"vsli.64 q14, q15, #32 \n"
"vmlal.s8 q8, d2, d24 \n"
"vmlal.s8 q9, d2, d25 \n"
"vdup.s8 d24, d14[0] \n"
"vdup.s8 d26, d14[1] \n"
"vdup.s8 d25, d14[2] \n"
"vdup.s8 d27, d14[3] \n"
"vsli.64 q12, q13, #32 \n"
"vmlal.s8 q8, d3, d28 \n"
"vmlal.s8 q9, d3, d29 \n"
"vdup.s8 d28, d14[4] \n"
"vdup.s8 d30, d14[5] \n"
"vdup.s8 d29, d14[6] \n"
"vdup.s8 d31, d14[7] \n"
"vsli.64 q14, q15, #32 \n"
"vmlal.s8 q8, d4, d24 \n"
"vmlal.s8 q9, d4, d25 \n"
/// r20 r21 r22
"vext.s8 d7, d6, d6, #1 \n"
"vext.s8 d8, d6, d6, #2 \n"
"vdup.s8 d24, d15[0] \n"
"vdup.s8 d26, d15[1] \n"
"vdup.s8 d25, d15[2] \n"
"vdup.s8 d27, d15[3] \n"
"vsli.64 d6, d6, #32 \n"// d6 = r20 r20
"vsli.64 d7, d7, #32 \n"// d7 = r21 r21
"vsli.64 d8, d8, #32 \n"// d8 = r22 r22
"vsli.64 q12, q13, #32 \n"
"vmlal.s8 q8, d5, d28 \n"
"vmlal.s8 q9, d5, d29 \n"
"vdup.s8 d28, d15[4] \n"
"vdup.s8 d30, d15[5] \n"
"vdup.s8 d29, d15[6] \n"
"vdup.s8 d31, d15[7] \n"
"vsli.64 q14, q15, #32 \n"
"vld1.s8 {d12}, [%7] \n"// d12=k22
"add %7, #4 \n"
"vmlal.s8 q8, d6, d24 \n"
"vmlal.s8 q9, d6, d25 \n"
"vdup.s8 d24, d12[0] \n"
"vdup.s8 d26, d12[1] \n"
"vdup.s8 d25, d12[2] \n"
"vdup.s8 d27, d12[3] \n"
"vsli.64 q12, q13, #32 \n"
"vmlal.s8 q8, d7, d28 \n"
"vmlal.s8 q9, d7, d29 \n"
"vld1.s32 {d0-d1}, [%0] \n"
"vld1.s32 {d2-d3}, [%1] \n"
"vmlal.s8 q8, d8, d24 \n"
"vmlal.s8 q9, d8, d25 \n"
///
"vld1.s32 {d4-d5}, [%2] \n"
"vld1.s32 {d6-d7}, [%3] \n"
"vaddw.s16 q0, q0, d16 \n"
"vaddw.s16 q1, q1, d17 \n"
"vaddw.s16 q2, q2, d18 \n"
"vaddw.s16 q3, q3, d19 \n"
"vst1.s32 {d0-d1}, [%0]! \n"
"vst1.s32 {d2-d3}, [%1]! \n"
"sub %7, #36 \n"
"vst1.s32 {d4-d5}, [%2]! \n"
"vst1.s32 {d6-d7}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(r0), // %4
"=r"(r1), // %5
"=r"(r2), // %6
"=r"(ktmp) // %7
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(r0),
"5"(r1),
"6"(r2),
"7"(ktmp)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15"
);
}
#endif
for (; remain>0; remain--)
{
#if __ARM_NEON
asm volatile(
"vld1.s8 {d0[]}, [%4]! \n"
"vld1.s8 {d1[]}, [%4]! \n"
"vld1.s8 {d4-d7}, [%7]! \n"// d4 d5 d6 d7 = 0~7
"vsli.64 d0, d1, #32 \n"// d0 = 00 01
"vld1.s8 {d2[]}, [%4] \n"
"sub %4, %4, #2 \n"
"vld1.s8 {d3[]}, [%5]! \n"
"vsli.64 d2, d3, #32 \n"// d2 = 02 10
"vmull.s8 q8, d0, d4 \n"
"vld1.s8 {d0[]}, [%5]! \n"
"vld1.s8 {d1[]}, [%5] \n"
"sub %5, %5, #2 \n"
"vsli.64 d0, d1, #32 \n"// d0 = 11 12
"vmlal.s8 q8, d2, d5 \n"
"vld1.s8 {d2[]}, [%6]! \n"
"vld1.s8 {d3[]}, [%6]! \n"
"vsli.64 d2, d3, #32 \n"// d2 = 20 21
"vmlal.s8 q8, d0, d6 \n"
"vld1.s8 {d0[]}, [%6] \n"
"sub %6, %6, #2 \n"
"veor d1, d1, d1 \n"
"vld1.s8 {d4}, [%7] \n"// d4 = 0~4 xxxx
"sub %7, #32 \n"
"vsli.64 d0, d1, #32 \n"// d0 = 22 zero
"vmlal.s8 q8, d2, d7 \n"
"vld1.s32 {d20[0]}, [%0] \n"
"vmlal.s8 q8, d0, d4 \n"
"vld1.s32 {d20[1]}, [%1] \n"
"vadd.s16 d16, d16, d17 \n"
"vld1.s32 {d21[0]}, [%2] \n"
"vld1.s32 {d21[1]}, [%3] \n"
"vaddw.s16 q10, q10, d16 \n"
"vst1.s32 {d20[0]}, [%0]! \n"
"vst1.s32 {d20[1]}, [%1]! \n"
"vst1.s32 {d21[0]}, [%2]! \n"
"vst1.s32 {d21[1]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(r0), // %4
"=r"(r1), // %5
"=r"(r2), // %6
"=r"(ktmp) // %7
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(r0),
"5"(r1),
"6"(r2),
"7"(ktmp)
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q10"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
sum0 += r0[0] * ktmp[0];
sum1 += r0[0] * ktmp[1];
sum2 += r0[0] * ktmp[2];
sum3 += r0[0] * ktmp[3];
sum0 += r0[1] * ktmp[4];
sum1 += r0[1] * ktmp[5];
sum2 += r0[1] * ktmp[6];
sum3 += r0[1] * ktmp[7];
ktmp += 8;
sum0 += r0[2] * ktmp[0];
sum1 += r0[2] * ktmp[1];
sum2 += r0[2] * ktmp[2];
sum3 += r0[2] * ktmp[3];
sum0 += r1[0] * ktmp[4];
sum1 += r1[0] * ktmp[5];
sum2 += r1[0] * ktmp[6];
sum3 += r1[0] * ktmp[7];
ktmp += 8;
sum0 += r1[1] * ktmp[0];
sum1 += r1[1] * ktmp[1];
sum2 += r1[1] * ktmp[2];
sum3 += r1[1] * ktmp[3];
sum0 += r1[2] * ktmp[4];
sum1 += r1[2] * ktmp[5];
sum2 += r1[2] * ktmp[6];
sum3 += r1[2] * ktmp[7];
ktmp += 8;
sum0 += r2[0] * ktmp[0];
sum1 += r2[0] * ktmp[1];
sum2 += r2[0] * ktmp[2];
sum3 += r2[0] * ktmp[3];
sum0 += r2[1] * ktmp[4];
sum1 += r2[1] * ktmp[5];
sum2 += r2[1] * ktmp[6];
sum3 += r2[1] * ktmp[7];
ktmp += 8;
sum0 += r2[2] * ktmp[0];
sum1 += r2[2] * ktmp[1];
sum2 += r2[2] * ktmp[2];
sum3 += r2[2] * ktmp[3];
ktmp += 8;
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
ktmp -= 8*5;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
#endif
r0++;
r1++;
r2++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
ktmp += 4*9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char* ktmp = _kernel.channel(p/4 + p%4);
for (int q=0; q<inch; q++)
{
int* outptr0 = out0;
int* outptr0n = outptr0 + outw;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
const signed char* r3 = img0 + w * 3;
int8x8_t _k00 = vdup_n_s8(ktmp[0]);
int8x8_t _k01 = vdup_n_s8(ktmp[1]);
int8x8_t _k02 = vdup_n_s8(ktmp[2]);
int8x8_t _k10 = vdup_n_s8(ktmp[3]);
int8x8_t _k11 = vdup_n_s8(ktmp[4]);
int8x8_t _k12 = vdup_n_s8(ktmp[5]);
int8x8_t _k20 = vdup_n_s8(ktmp[6]);
int8x8_t _k21 = vdup_n_s8(ktmp[7]);
int8x8_t _k22 = vdup_n_s8(ktmp[8]);
int i = 0;
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%3, #128] \n"
"vld1.s8 {d4-d5}, [%3] \n"// d4=r00 d5=r00n
"add %3, #8 \n"
"pld [%6, #128] \n"
"vld1.s8 {d6-d7}, [%6] \n"// d6=r30 d7=r30n
"add %6, #8 \n"
"vext.s8 d8, d4, d5, #1 \n"// d8=r01
"vext.s8 d10, d6, d7, #1 \n"// d10=r31
"vmull.s8 q8, d4, %P14 \n"
"vmull.s8 q9, d6, %P20 \n"
"vext.s8 d9, d4, d5, #2 \n"// d9=r02
"vext.s8 d11, d6, d7, #2 \n"// d11=r32
"vmlal.s8 q8, d8, %P15 \n"
"vmlal.s8 q9, d10, %P21 \n"
"pld [%4, #128] \n"
"vld1.s8 {d4-d5}, [%4] \n"// d4=r10 d5=r10n
"add %4, #8 \n"
"vmlal.s8 q8, d9, %P16 \n"
"vmlal.s8 q9, d11, %P22 \n"
"vext.s8 d8, d4, d5, #1 \n"// d8=r11
"vmlal.s8 q8, d4, %P17 \n"
"vmlal.s8 q9, d4, %P14 \n"
"vext.s8 d9, d4, d5, #2 \n"// d9=r12
"vmlal.s8 q8, d8, %P18 \n"
"vmlal.s8 q9, d8, %P15 \n"
"pld [%5, #128] \n"
"vld1.s8 {d6-d7}, [%5] \n"// d6=r20 d7=r20n
"add %5, #8 \n"
"vmlal.s8 q8, d9, %P19 \n"
"vmlal.s8 q9, d9, %P16 \n"
"vext.s8 d10, d6, d7, #1 \n"// d10=r21
"vmlal.s8 q8, d6, %P20 \n"
"vmlal.s8 q9, d6, %P17 \n"
"vext.s8 d11, d6, d7, #2 \n"// d11=r22
"vmlal.s8 q8, d10, %P21 \n"
"vmlal.s8 q9, d10, %P18 \n"
"pld [%1, #256] \n"
"vld1.s32 {d0-d3}, [%1] \n"
"vmlal.s8 q8, d11, %P22 \n"
"vmlal.s8 q9, d11, %P19 \n"
"pld [%2, #256] \n"
"vld1.s32 {d12-d15}, [%2] \n"
"vaddw.s16 q0, q0, d16 \n"
"vaddw.s16 q1, q1, d17 \n"
"vaddw.s16 q6, q6, d18 \n"
"vaddw.s16 q7, q7, d19 \n"
"vst1.s32 {d0-d3}, [%1]! \n"
"subs %0, #1 \n"
"vst1.s32 {d12-d15}, [%2]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k00), // %14
"w"(_k01), // %15
"w"(_k02), // %16
"w"(_k10), // %17
"w"(_k11), // %18
"w"(_k12), // %19
"w"(_k20), // %20
"w"(_k21), // %21
"w"(_k22) // %22
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9"
);
}
#endif
#if __ARM_NEON
if (remain >= 4)
{
remain -= 4;
asm volatile(
"vld1.s8 {d12}, [%6]! \n"// d12=0~7
/// r00 r01 r02
"vld1.s8 {d0}, [%2] \n"// d0 = r00 r00n
"add %2, #4 \n"
"vdup.s8 d24, d12[0] \n"
"vext.s8 d1, d0, d0, #1 \n"
"vext.s8 d2, d0, d0, #2 \n"
/// r10 r11 r12
"vld1.s8 {d3}, [%3] \n"// d3 = r10 r10n
"add %3, #4 \n"
"vdup.s8 d25, d12[1] \n"
"vext.s8 d4, d3, d3, #1 \n"
"vext.s8 d5, d3, d3, #2 \n"
"vdup.s8 d26, d12[2] \n"
"vsli.64 d0, d3, #32 \n"// d0 = r00 r10
"vsli.64 d1, d4, #32 \n"// d1 = r01 r11
"vsli.64 d2, d5, #32 \n"// d2 = r02 r12
"vmull.s8 q8, d0, d24 \n"
"vmull.s8 q9, d1, d25 \n"
/// r20 r21 r22
"vld1.s8 {d6}, [%4] \n"// d6 = r20 r20n
"add %4, #4 \n"
"vdup.s8 d27, d12[3] \n"
"vdup.s8 d28, d12[4] \n"
"vext.s8 d7, d6, d6, #1 \n"
"vext.s8 d8, d6, d6, #2 \n"
"vsli.64 d3, d6, #32 \n"// d3 = r10 r20
"vdup.s8 d29, d12[5] \n"
"vmlal.s8 q8, d2, d26 \n"
"vmlal.s8 q9, d3, d27 \n"
"vsli.64 d4, d7, #32 \n"// d4 = r11 r21
"vsli.64 d5, d8, #32 \n"// d5 = r12 r22
/// r30 r31 r32
"vld1.s8 {d9}, [%5] \n"// d9 = r30 r30n
"add %5, #4 \n"
"vdup.s8 d30, d12[6] \n"
"vdup.s8 d31, d12[7] \n"
"vmlal.s8 q8, d4, d28 \n"
"vmlal.s8 q9, d5, d29 \n"
"vext.s8 d10, d9, d9, #1 \n"
"vext.s8 d11, d9, d9, #2 \n"
"vsli.64 d6, d9, #32 \n"// d6 = r20 r30
"vsli.64 d7, d10, #32 \n"// d7 = r21 r31
"vmlal.s8 q8, d6, d30 \n"
"vmlal.s8 q9, d7, d31 \n"
"vld1.s8 {d13[]}, [%6]! \n"
"vsli.64 d8, d11, #32 \n"// d8 = r22 r32
"vmlal.s8 q8, d8, d13 \n"
///
"vld1.s32 {d0-d1}, [%0] \n"
"vadd.s16 q8, q8, q9 \n"
"vld1.s32 {d2-d3}, [%1] \n"
"vaddw.s16 q0, q0, d16 \n"
"vaddw.s16 q1, q1, d17 \n"
"sub %6, #9 \n"
"vst1.s32 {d0-d1}, [%0]! \n"
"vst1.s32 {d2-d3}, [%1]! \n"
: "=r"(outptr0), // %0
"=r"(outptr0n), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(ktmp) // %6
: "0"(outptr0),
"1"(outptr0n),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(ktmp)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif
for (; remain>0; remain--)
{
#if __ARM_NEON
asm volatile(
"vld1.s8 {d0[0]}, [%2]! \n"
"vld1.s8 {d0[1]}, [%2]! \n"
"vld1.s8 {d0[2]}, [%2] \n"
"sub %2, #2 \n"
"vld1.s8 {d0[3]}, [%3]! \n"
"vld1.s8 {d0[4]}, [%3]! \n"
"vld1.s8 {d0[5]}, [%3] \n"
"sub %3, #2 \n"
"vld1.s8 {d0[6]}, [%4]! \n"
"vld1.s8 {d0[7]}, [%4]! \n"// d0=r
"vld1.s8 {d4[]}, [%4] \n"// d4=r22
"sub %4, #2 \n"
"vext.s8 d1, d0, d4, #3 \n"
"vld1.s8 {d1[6]}, [%5]! \n"
"vld1.s8 {d1[7]}, [%5]! \n"// d1=rn
"vld1.s8 {d2}, [%6]! \n"// d2=k01234567
"vld1.s8 {d5[]}, [%5] \n"// d5=r32
"sub %5, #2 \n"
"veor d3, d3 \n"
"vmull.s8 q8, d0, d2 \n"
"vmull.s8 q9, d1, d2 \n"
"vld1.s8 {d3[0]}, [%6] \n"// d3=k8 ... zeros
"sub %6, #8 \n"
"vmlal.s8 q8, d4, d3 \n"
"vmlal.s8 q9, d5, d3 \n"
"vld1.s32 {d6[0]}, [%0] \n"
"vadd.s16 d16, d16, d17 \n"
"vadd.s16 d18, d18, d19 \n"
"vld1.s32 {d6[1]}, [%1] \n"
"vpadd.s16 d16, d16, d18 \n"
"vpadal.s16 d6, d16 \n"
"vst1.s32 {d6[0]}, [%0]! \n"
"vst1.s32 {d6[1]}, [%1]! \n"
: "=r"(outptr0), // %0
"=r"(outptr0n), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(ktmp) // %6
: "0"(outptr0),
"1"(outptr0n),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(ktmp)
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9"
);
#else
int sum0 = 0;
int sum0n = 0;
sum0 += r0[0] * ktmp[0];
sum0 += r0[1] * ktmp[1];
sum0 += r0[2] * ktmp[2];
sum0 += r1[0] * ktmp[3];
sum0 += r1[1] * ktmp[4];
sum0 += r1[2] * ktmp[5];
sum0 += r2[0] * ktmp[6];
sum0 += r2[1] * ktmp[7];
sum0 += r2[2] * ktmp[8];
sum0n += r1[0] * ktmp[0];
sum0n += r1[1] * ktmp[1];
sum0n += r1[2] * ktmp[2];
sum0n += r2[0] * ktmp[3];
sum0n += r2[1] * ktmp[4];
sum0n += r2[2] * ktmp[5];
sum0n += r3[0] * ktmp[6];
sum0n += r3[1] * ktmp[7];
sum0n += r3[2] * ktmp[8];
*outptr0 += sum0;
*outptr0n += sum0n;
outptr0++;
outptr0n++;
#endif
r0++;
r1++;
r2++;
r3++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr0n += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%2, #128] \n"
"vld1.s8 {d4-d5}, [%2] \n"// d4=r00 d5=r00n
"add %2, #8 \n"
"vext.s8 d8, d4, d5, #1 \n"// d8=r01
"vmull.s8 q8, d4, %P10 \n"
"vext.s8 d9, d4, d5, #2 \n"// d9=r02
"vmull.s8 q9, d8, %P11 \n"
"pld [%3, #128] \n"
"vld1.s8 {d6-d7}, [%3] \n"// d6=r10 d7=r10n
"add %3, #8 \n"
"vmlal.s8 q8, d9, %P12 \n"
"vext.s8 d10, d6, d7, #1 \n"// d10=r11
"vmlal.s8 q9, d6, %P13 \n"
"vext.s8 d11, d6, d7, #2 \n"// d11=r12
"vmlal.s8 q8, d10, %P14 \n"
"pld [%4, #128] \n"
"vld1.s8 {d4-d5}, [%4] \n"// d4=r20 d5=r20n
"add %4, #8 \n"
"vmlal.s8 q9, d11, %P15 \n"
"vext.s8 d8, d4, d5, #1 \n"// d8=r21
"vmlal.s8 q8, d4, %P16 \n"
"vext.s8 d9, d4, d5, #2 \n"// d9=r22
"vmlal.s8 q9, d8, %P17 \n"
"vmlal.s8 q8, d9, %P18 \n"
"pld [%1, #256] \n"
"vld1.s32 {d0-d3}, [%1] \n"
"vadd.s16 q8, q8, q9 \n"
"vaddw.s16 q0, q0, d16 \n"
"vaddw.s16 q1, q1, d17 \n"
"subs %0, #1 \n"
"vst1.s32 {d0-d3}, [%1]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q8", "q9"
);
}
#endif
for (; remain>0; remain--)
{
int sum0 = 0;
sum0 += r0[0] * ktmp[0];
sum0 += r0[1] * ktmp[1];
sum0 += r0[2] * ktmp[2];
sum0 += r1[0] * ktmp[3];
sum0 += r1[1] * ktmp[4];
sum0 += r1[2] * ktmp[5];
sum0 += r2[0] * ktmp[6];
sum0 += r2[1] * ktmp[7];
sum0 += r2[2] * ktmp[8];
*outptr0 += sum0;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
ktmp += 9;
}
}
}
static void conv3x3s2_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int outw = top_blob.w;
int remain = outw & 7;
typedef void (*conv_func_int8)(const Mat&, Mat&, const Mat&, const Option&);
conv_func_int8 conv_func_table[8] =
{
conv3x3s2_neon_s8, //0
conv3x3s2_neon_s8, //1
conv3x3s2_neon_s8, //2
conv3x3s2_neon_s8, //3
conv3x3s2_neon_s8, //4
conv3x3s2_neon_s8, //5
conv3x3s2_neon_s8, //6
conv3x3s2_neon_s8, //7
};
conv_func_int8 conv = conv_func_table[remain];
conv(bottom_blob, top_blob, _kernel, opt);
return;
}
#endif
|
trsm_x_sky_u_lo_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT r = 0; r <A->rows; r++)
{
ALPHA_Number temp;
alpha_setzero(temp);
ALPHA_INT start = A->pointers[r];
ALPHA_INT end = A->pointers[r + 1];
ALPHA_INT idx = 1;
ALPHA_INT eles_num = end - start;
for (ALPHA_INT ai = start; ai < end - 1; ++ai)
{
ALPHA_INT c = r - eles_num + idx;
alpha_madde(temp, A->values[ai], y[out_y_col * ldy + c]);
idx ++;
}
ALPHA_Number t;
alpha_mul(t, alpha, x[out_y_col * ldx + r]);
alpha_sub(y[out_y_col * ldy + r], t, temp);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
precedence_move_generator.h | /*****************************************************************************/
// Copyright (c) 2020-2021 Yuji KOGUMA
// Released under the MIT license
// https://opensource.org/licenses/mit-license.php
/*****************************************************************************/
#ifndef PRINTEMPS_NEIGHBORHOOD_PRECEDENCE_MOVE_GENERATOR_H__
#define PRINTEMPS_NEIGHBORHOOD_PRECEDENCE_MOVE_GENERATOR_H__
#include "abstract_move_generator.h"
namespace printemps {
namespace neighborhood {
/*****************************************************************************/
template <class T_Variable, class T_Expression>
class PrecedenceMoveGenerator
: public AbstractMoveGenerator<T_Variable, T_Expression> {
private:
public:
/*************************************************************************/
PrecedenceMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
virtual ~PrecedenceMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
void setup(const std::vector<model_component::Constraint<
T_Variable, T_Expression> *> &a_RAW_CONSTRAINT_PTRS) {
/**
* Exclude constraints which contain fixed variables or selection
* variables.
*/
auto constraint_ptrs =
extract_effective_constraint_ptrs(a_RAW_CONSTRAINT_PTRS);
/**
* Convert constraint objects to BinomialConstraint objects.
*/
auto binomials = convert_to_binomial_constraints(constraint_ptrs);
/**
* Setup move objects.
*/
const int BINOMIALS_SIZE = binomials.size();
this->m_moves.resize(2 * BINOMIALS_SIZE);
this->m_flags.resize(2 * BINOMIALS_SIZE);
for (auto i = 0; i < BINOMIALS_SIZE; i++) {
this->m_moves[2 * i].sense = MoveSense::Precedence;
this->m_moves[2 * i].alterations.emplace_back(
binomials[i].variable_ptr_first, 0);
this->m_moves[2 * i].alterations.emplace_back(
binomials[i].variable_ptr_second, 0);
this->m_moves[2 * i].is_univariable_move = false;
utility::update_union_set(
&(this->m_moves[2 * i].related_constraint_ptrs),
binomials[i].variable_ptr_first->related_constraint_ptrs());
utility::update_union_set(
&(this->m_moves[2 * i].related_constraint_ptrs),
binomials[i].variable_ptr_second->related_constraint_ptrs());
this->m_moves[2 * i].is_special_neighborhood_move = true;
this->m_moves[2 * i].is_available = true;
this->m_moves[2 * i].overlap_rate = 0.0;
this->m_moves[2 * i + 1] = this->m_moves[2 * i];
}
/**
* Setup move updater.
*/
auto move_updater = //
[this, binomials, BINOMIALS_SIZE](
auto * a_moves, //
auto * a_flags, //
const bool a_ACCEPT_ALL, //
const bool a_ACCEPT_OBJECTIVE_IMPROVABLE, //
const bool a_ACCEPT_FEASIBILITY_IMPROVABLE, //
[[maybe_unused]] const bool a_IS_ENABLED_PARALLEL) {
#ifdef _OPENMP
#pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static)
#endif
for (auto i = 0; i < BINOMIALS_SIZE; i++) {
{
auto index = 2 * i;
auto &alterations = (*a_moves)[index].alterations;
alterations[0].second =
binomials[i].variable_ptr_first->value() + 1;
alterations[1].second =
binomials[i].variable_ptr_second->value() + 1;
}
{
auto index = 2 * i + 1;
auto &alterations = (*a_moves)[index].alterations;
alterations[0].second =
binomials[i].variable_ptr_first->value() - 1;
alterations[1].second =
binomials[i].variable_ptr_second->value() - 1;
}
}
const int MOVES_SIZE = a_moves->size();
#ifdef _OPENMP
#pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static)
#endif
for (auto i = 0; i < MOVES_SIZE; i++) {
(*a_flags)[i] = 1;
if (!(*a_moves)[i].is_available) {
(*a_flags)[i] = 0;
continue;
}
if (neighborhood::has_fixed_variable((*a_moves)[i])) {
(*a_flags)[i] = 0;
continue;
}
if (neighborhood::has_bound_violation((*a_moves)[i])) {
(*a_flags)[i] = 0;
continue;
}
if (a_ACCEPT_ALL) {
/** nothing to do */
} else {
if (a_ACCEPT_OBJECTIVE_IMPROVABLE &&
neighborhood::has_objective_improvable_variable(
(*a_moves)[i])) {
continue;
}
if (a_ACCEPT_FEASIBILITY_IMPROVABLE &&
neighborhood::has_feasibility_improvable_variable(
(*a_moves)[i])) {
continue;
}
(*a_flags)[i] = 0;
}
}
};
this->m_move_updater = move_updater;
}
};
} // namespace neighborhood
} // namespace printemps
#endif
/*****************************************************************************/
// END
/*****************************************************************************/ |
threshold.c | /* Copyright 2014. The Regents of the University of California.
* Copyright 2015-2017. Martin Uecker.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2013-2017 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* 2015-2016 Jon Tamir <jtamir@eecs.berkeley.edu>
* 2015 Frank Ong <frankong@berkeley.edu>
*/
#include <stdbool.h>
#include <complex.h>
#include "num/flpmath.h"
#include "num/multind.h"
#include "num/init.h"
#include "iter/prox.h"
#include "iter/thresh.h"
#include "misc/mmio.h"
#include "misc/misc.h"
#include "misc/debug.h"
#include "misc/opts.h"
#include "lowrank/lrthresh.h"
#include "linops/waveop.h"
#include "dfwavelet/prox_dfwavelet.h"
// FIXME: lowrank interface should not be coupled to mri.h -- it should take D as an input
#ifndef DIMS
#define DIMS 16
#endif
// FIXME: consider moving this to a more accessible location?
static void wthresh(unsigned int D, const long dims[D], float lambda, unsigned int flags, complex float* out, const complex float* in)
{
long minsize[D];
md_singleton_dims(D, minsize);
long course_scale[3] = MD_INIT_ARRAY(3, 16);
md_copy_dims(3, minsize, course_scale);
unsigned int wflags = 7; // FIXME
for (unsigned int i = 0; i < 3; i++)
if (dims[i] < minsize[i])
wflags = MD_CLEAR(wflags, i);
long strs[D];
md_calc_strides(D, strs, dims, CFL_SIZE);
const struct linop_s* w = linop_wavelet_create(D, wflags, dims, strs, minsize, false);
const struct operator_p_s* p = prox_unithresh_create(D, w, lambda, flags);
operator_p_apply(p, 1., D, dims, out, D, dims, in);
operator_p_free(p);
}
static void lrthresh(unsigned int D, const long dims[D], int llrblk, float lambda, unsigned int flags, complex float* out, const complex float* in)
{
long blkdims[MAX_LEV][D];
int levels = llr_blkdims(blkdims, ~flags, dims, llrblk);
UNUSED(levels);
const struct operator_p_s* p = lrthresh_create(dims, false, ~flags, (const long (*)[])blkdims, lambda, false, false);
operator_p_apply(p, 1., D, dims, out, D, dims, in);
operator_p_free(p);
}
static void dfthresh(unsigned int D, const long dims[D], float lambda, complex float* out, const complex float* in)
{
long minsize[3];
md_singleton_dims(3, minsize);
long coarse_scale[3] = MD_INIT_ARRAY(3, 16);
md_min_dims(3, ~0u, minsize, dims, coarse_scale);
complex float res[3];
res[0] = 1.;
res[1] = 1.;
res[2] = 1.;
assert(3 == dims[TE_DIM]);
const struct operator_p_s* p = prox_dfwavelet_create(dims, minsize, res, TE_DIM, lambda, false);
operator_p_apply(p, 1., D, dims, out, D, dims, in);
operator_p_free(p);
}
static void hard_thresh(unsigned int D, const long dims[D], float lambda, complex float* out, const complex float* in)
{
long size = md_calc_size(DIMS, dims) * 2;
const float* inf = (const float*)in;
float* outf = (float*)out;
#pragma omp parallel for
for (long i = 0; i < size; i++)
outf[i] = inf[i] > lambda ? inf[i] : 0.;
}
static const char usage_str[] = "lambda <input> <output>";
static const char help_str[] = "Perform (soft) thresholding with parameter lambda.";
int main_threshold(int argc, char* argv[])
{
unsigned int flags = 0;
enum th_type { NONE, WAV, LLR, DFW, MPDFW, HARD } th_type = NONE;
int llrblk = 8;
const struct opt_s opts[] = {
OPT_SELECT('H', enum th_type, &th_type, HARD, "hard thresholding"),
OPT_SELECT('W', enum th_type, &th_type, WAV, "daubechies wavelet soft-thresholding"),
OPT_SELECT('L', enum th_type, &th_type, LLR, "locally low rank soft-thresholding"),
OPT_SELECT('D', enum th_type, &th_type, DFW, "divergence-free wavelet soft-thresholding"),
OPT_UINT('j', &flags, "bitmask", "joint soft-thresholding"),
OPT_INT('b', &llrblk, "blocksize", "locally low rank block size"),
};
cmdline(&argc, argv, 3, 3, usage_str, help_str, ARRAY_SIZE(opts), opts);
num_init();
const int N = DIMS;
long dims[N];
complex float* idata = load_cfl(argv[2], N, dims);
complex float* odata = create_cfl(argv[3], N, dims);
float lambda = atof(argv[1]);
switch (th_type) {
case WAV:
wthresh(N, dims, lambda, flags, odata, idata);
break;
case LLR:
lrthresh(N, dims, llrblk, lambda, flags, odata, idata);
break;
case DFW:
dfthresh(N, dims, lambda, odata, idata);
break;
case HARD:
hard_thresh(N, dims, lambda, odata, idata);
break;
default:
md_zsoftthresh(N, dims, lambda, flags, odata, idata);
}
unmap_cfl(N, dims, idata);
unmap_cfl(N, dims, odata);
return 0;
}
|
tensor_voting.h | #ifndef CVD_INC_TENSOR_VOTE_H
#define CVD_INC_TENSOR_VOTE_H
#include <cvd/image.h>
#include <TooN/TooN.h>
#include <TooN/helpers.h>
#include <vector>
#include <utility>
namespace CVD
{
#ifndef DOXYGEN_IGNORE_INTERNAL
namespace TensorVoting
{
struct TV_coord
{
std::ptrdiff_t o;
int x;
int y;
};
std::vector<std::pair<TV_coord, TooN::Matrix<2> > > compute_a_tensor_kernel(int radius, double cutoff, double angle, double sigma, double ratio, int row_stride);
inline unsigned int quantize_half_angle(double r, int num_divs)
{
return ((int)floor((r/M_PI+100) * num_divs + 0.5)) % num_divs;
}
}
#endif
/**
This function performs tensor voting on the gradients of an image. The
voting is performed densely at each pixel, and the contribution of each
pixel is scaled by its gradient magnitude. The kernel for voting is
computed as follows. Consider that there is a point at \f$(0,0)\f$, with
gradient normal \f$(0,1)\f$. This will make a contribution to the point
\f$(x,y)\f$.
The arc-length, \f$l\f$, of the arc passing through \f$(0,0)\f$, tangent to
the gradient at this point and also passing through \f$(x, y)\f$ is:
\f[
l = 2 r \theta
\f]
Where
\f[
\theta = \tan^{-1}\frac{y}{x}
\f]
and the radius of the arc, \f$r\f$ is:
\f[
r = \frac{x^2 + y^2}{2y}.
\f]
The scale of the contribution is:
\f[
s = e^{-\frac{l^2}{\sigma^2} - \kappa\frac{\sigma^2}{r^2}}.
\f]
Note that this is achieved by scaling \f$x\f$ and \f$y\f$ by \f$\sigma\f$, so
\f$\kappa\f$ controls the kernel shape independent of the size.
The complete tensor contribution is therefore:
\f[
e^{-\frac{l^2}{\sigma^2} - \kappa\frac{\sigma^2}{r^2}}
\left[
\begin{array}{c}
\cos 2\theta\\
\sin 2\theta
\end{array}
\right]
[ \cos 2\theta\ \ \sin 2\theta]
\f]
@param image The image on which to perform tensor voting
@param sigma \f$ \sigma \f$
@param ratio \f$ \kappa \f$
@param cutoff When \f$s\f$ points drop below the cutoff, it is set to zero.
@param num_divs The voting kernels are quantized by angle in to this many dicisions in the half-circle.
@ingroup gVision
**/
template<class C> Image<TooN::Matrix<2> > dense_tensor_vote_gradients(const BasicImage<C>& image, double sigma, double ratio, double cutoff=0.001, unsigned int num_divs = 4096)
{
using TooN::Matrix;
using std::pair;
using std::vector;
using TensorVoting::TV_coord;
Matrix<2> zero(TooN::Zeros);
Image<Matrix<2> > field(image.size(), zero);
//Kernel values go as exp(-x*x / sigma * sigma)
//So, for cutoff = exp(-x*x / sigma * sigma)
//ln cutoff = -x*x / sigma*sigma
//x = sigma * sqrt(-ln cutoff)
int kernel_radius = (int)ceil(sigma * sqrt(-log(cutoff)));
//First, build up the kernels
vector<vector<pair<TV_coord, Matrix<2> > > > kernels;
for(unsigned int i=0; i < num_divs; i++)
{
double angle = M_PI * i / num_divs;
kernels.push_back(TensorVoting::compute_a_tensor_kernel(kernel_radius, cutoff, angle, sigma, ratio, field.row_stride()));
}
for(int y= kernel_radius; y < field.size().y - kernel_radius; y++)
for(int x= kernel_radius; x < field.size().x - kernel_radius; x++)
{
double gx = ((double)image[y][x+1] - image[y][x-1])/2.;
double gy = ((double)image[y+1][x] - image[y-1][x])/2.;
double scale = sqrt(gx*gx + gy*gy);
unsigned int direction = TensorVoting::quantize_half_angle(M_PI/2 + atan2(gy,gx), num_divs);
const vector<pair<TV_coord, Matrix<2> > >& kernel = kernels[direction];
Matrix<2>* p = &field[y][x];
//The matrices are all symmetric, so only use the upper right triangle.
for(unsigned int i=0; i < kernel.size(); i++)
{
p[kernel[i].first.o][0][0] += kernel[i].second[0][0] * scale;
p[kernel[i].first.o][0][1] += kernel[i].second[0][1] * scale;
p[kernel[i].first.o][1][1] += kernel[i].second[1][1] * scale;
}
}
//Now do the edges
for(int y= 1; y < field.size().y-1; y++)
{
for(int x= 1; x < field.size().x-1; x++)
{
//Skip the middle bit
if(y >= kernel_radius && y < field.size().y - kernel_radius && x == kernel_radius)
x = field.size().x - kernel_radius;
double gx = ((double)image[y][x+1] - image[y][x-1])/2.;
double gy = ((double)image[y+1][x] - image[y-1][x])/2.;
double scale = sqrt(gx*gx + gy*gy);
unsigned int direction = TensorVoting::quantize_half_angle(M_PI/2 + atan(gy / gx), num_divs);
const vector<pair<TV_coord, Matrix<2> > >& kernel = kernels[direction];
Matrix<2>* p = &field[y][x];
//The matrices are all symmetric, so only use the upper right triangle.
for(unsigned int i=0; i < kernel.size(); i++)
{
if(kernel[i].first.y+y >= 0 && kernel[i].first.y+y < field.size().y && kernel[i].first.x+x >= 0 && kernel[i].first.x+x < field.size().x)
{
p[kernel[i].first.o][0][0] += kernel[i].second[0][0] * scale;
p[kernel[i].first.o][0][1] += kernel[i].second[0][1] * scale;
p[kernel[i].first.o][1][1] += kernel[i].second[1][1] * scale;
}
}
}
}
//Copy over bits to make the matrices symmetric
for(Image<Matrix<2> >:: iterator i=field.begin(); i != field.end(); i++)
(*i)[1][0] = (*i)[0][1];
return field;
}
#ifdef CVD_EXPERIMENTAL
template<class C> Image<TooN::Matrix<2> > dense_tensor_vote_gradients_fast(const BasicImage<C>& image, double sigma, double ratio, double cutoff=0.001, int num_divs = 4096)
{
using TooN::Matrix;
using std::pair;
using std::make_pair;
using std::vector;
using TensorVoting::TV_coord;
Matrix<2> zero(TooN::Zeros);
Image<Matrix<2> > ffield(image.size(), zero);
Image<__m128> field(image.size());
field.zero();
//In much the same way as dense_tensor_vote_gradients, build up the kernel.
int kernel_radius = (int)ceil(sigma * sqrt(-log(cutoff)));
vector<vector<pair<TV_coord, Matrix<2> > > > matrix_kernels;
for(int i=0; i < num_divs; i++)
{
double angle = M_PI * i / num_divs;
matrix_kernels.push_back(TensorVoting::compute_a_tensor_kernel(kernel_radius, cutoff, angle, sigma, ratio, field.row_stride()));
}
//Put the kernel in aligned SSE registers.
//Image<__m128> is used since it guarantees SSE aligned memory.
vector<vector<int> > kernel_offsets;
vector<Image<__m128> > kernel_values;
for(unsigned int i=0; i < matrix_kernels.size(); i++)
{
vector<int> off(matrix_kernels[i].size());
Image<__m128> val(ImageRef(matrix_kernels[i].size(), 1));
for(unsigned int j=0; j < matrix_kernels[i].size(); j++)
{
off[j] = matrix_kernels[i][j].first.o;
Matrix<2>& m = matrix_kernels[i][j].second;
val.data()[j] = _mm_setr_ps(m[0][0], m[0][1], m[1][0], m[1][1]);
}
kernel_offsets.push_back(off);
kernel_values.push_back(val);
}
#pragma omp parallel for
for(int y= kernel_radius; y < field.size().y - kernel_radius; y++)
for(int x= kernel_radius; x < field.size().x - kernel_radius; x++)
{
float gx = ((float)image[y][x+1] - image[y][x-1])/2.;
float gy = ((float)image[y+1][x] - image[y-1][x])/2.;
float scale = sqrt(gx*gx + gy*gy);
unsigned int direction = TensorVoting::quantize_half_angle(M_PI/2 + atan(gy / gx), num_divs);
const vector<int> & off = kernel_offsets[direction];
__m128* val = kernel_values[direction].data();
__m128* p = &field[y][x];
__m128 s = _mm_set1_ps(scale);
for(unsigned int i=0; i < off.size(); i++)
p[off[i]] = _mm_add_ps(p[off[i]], _mm_mul_ps(val[i], s));
}
for(int y=0; y < field.size().y; y++)
for(int x=0; x < field.size().x; x++)
{
float f[4];
_mm_storeu_ps(f, field[y][x]);
ffield[y][x][0][0] = f[0];
ffield[y][x][0][1] = f[1];
ffield[y][x][1][0] = f[2];
ffield[y][x][1][1] = f[3];
}
return ffield;
}
#endif
}
#endif
|
aarch64_vfabi_NarrowestDataSize.c | // RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon -fopenmp -x c -emit-llvm %s -o - -femit-all-decls | FileCheck %s
// RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon -fopenmp-simd -x c -emit-llvm %s -o - -femit-all-decls | FileCheck %s
// REQUIRES: aarch64-registered-target
// Note: -fopemp and -fopenmp-simd behavior are expected to be the same.
// This test checks the values of Narrowest Data Size (NDS), as defined in
// https://github.com/ARM-software/abi-aa/tree/main/vfabia64
//
// NDS is used to compute the <vlen> token in the name of AdvSIMD
// vector functions when no `simdlen` is specified, with the rule:
//
// if NDS(f) = 1, then VLEN = 16, 8;
// if NDS(f) = 2, then VLEN = 8, 4;
// if NDS(f) = 4, then VLEN = 4, 2;
// if NDS(f) = 8 or NDS(f) = 16, then VLEN = 2.
// NDS(NDS_is_sizeof_char) = 1
#pragma omp declare simd notinbranch
char NDS_is_sizeof_char(short in);
// CHECK-DAG: _ZGVnN16v_NDS_is_sizeof_char
// CHECK-DAG: _ZGVnN8v_NDS_is_sizeof_char
// CHECK-NOT: _ZGV{{.*}}_NDS_is_sizeof_char
// NDS(NDS_is_sizeof_short) = 2
#pragma omp declare simd notinbranch
int NDS_is_sizeof_short(short in);
// CHECK-DAG: _ZGVnN8v_NDS_is_sizeof_short
// CHECK-DAG: _ZGVnN4v_NDS_is_sizeof_short
// CHECK-NOT: _ZGV{{.*}}_NDS_is_sizeof_short
// NDS(NDS_is_sizeof_float_with_linear) = 4, and not 2, because the pointers are
// marked as `linear` and therefore the size of the pointee realizes
// the NDS.
#pragma omp declare simd linear(sin) notinbranch
void NDS_is_sizeof_float_with_linear(double in, float *sin);
// Neon accepts only power of 2 values as <vlen>.
// CHECK-DAG: _ZGVnN4vl4_NDS_is_sizeof_float_with_linear
// CHECK-DAG: _ZGVnN2vl4_NDS_is_sizeof_float_with_linear
// CHECK-NOT: _ZGV{{.*}}_NDS_is_sizeof_float_with_linear
// NDS(NDS_is_size_of_float) = 4
#pragma omp declare simd notinbranch
double NDS_is_size_of_float(float in);
// CHECK-DAG: _ZGVnN4v_NDS_is_size_of_float
// CHECK-DAG: _ZGVnN2v_NDS_is_size_of_float
// CHECK-NOT: _ZGV{{.*}}_NDS_is_size_of_float
// NDS(NDS_is_sizeof_double) = 8
#pragma omp declare simd linear(sin) notinbranch
void NDS_is_sizeof_double(double in, double *sin);
// CHECK-DAG: _ZGVnN2vl8_NDS_is_sizeof_double
// CHECK-NOT: _ZGV{{.*}}_NDS_is_sizeof_double
// NDS(double_complex) = 16
#pragma omp declare simd notinbranch
double _Complex double_complex(double _Complex);
// CHECK-DAG: _ZGVnN2v_double_complex
// CHECK-NOT: _ZGV{{.*}}_double_complex
// NDS(double_complex_linear_char) = 1, becasue `x` is marked linear.
#pragma omp declare simd linear(x) notinbranch
double _Complex double_complex_linear_char(double _Complex y, char *x);
// CHECK-DAG: _ZGVnN8vl_double_complex_linear_char
// CHECK-DAG: _ZGVnN16vl_double_complex_linear_char
// CHECK-NOT: _ZGV{{.*}}_double_complex_linear_char
static float *F;
static double *D;
static short S;
static int I;
static char C;
static double _Complex DC;
void do_something() {
C = NDS_is_sizeof_char(S);
I = NDS_is_sizeof_short(S);
NDS_is_sizeof_float_with_linear(*D, F);
*D = NDS_is_size_of_float(*F);
NDS_is_sizeof_double(*D, D);
DC = double_complex(DC);
DC = double_complex_linear_char(DC, &C);
}
|
stresslet_direct.c | #include "mex.h"
#include "math.h"
#define X prhs[0] // Source locations
#define F prhs[1] // Source strengths
#define U plhs[0] // Output
#ifndef VERBOSE
#define VERBOSE 0
#endif
#define PI 3.141592653589793
inline double dot(double * a,double * b)
{
return ( a[0]*b[0] + a[1]*b[1] + a[2]*b[2] );
}
/* no input checking is done */
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
// input dims
const int N = mxGetM(X);
const double* restrict x = mxGetPr(X);
const double* restrict f = mxGetPr(F);
U = mxCreateDoubleMatrix(N, 3, mxREAL);
double* restrict u = mxGetPr(U);
if(VERBOSE)
mexPrintf("[FS Stresslet Direct ] MEX N=%d ",N);
// call kernel
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int m=0; m<N; m++)
{
double p[] = {0,0,0};
for(int n = 0; n<N; n++){
double q[] = {f[n], f[n+N], f[n+2*N]};
double s[] = {f[n+3*N], f[n+4*N], f[n+5*N]};
double r[] = {x[m]-x[n], x[m+N]-x[n+N],x[m+2*N]-x[n+2*N]};
double ri = 1.0/sqrt(dot(r,r));
double ri5 = ri*ri*ri*ri*ri;
double rdots = dot(r,s);
double rdotq = dot(r,q);
double c = -6.0* rdots * rdotq * ri5;
if(m==n)
continue;
p[0] += c*r[0];
p[1] += c*r[1];
p[2] += c*r[2];
}
u[m ] = p[0];
u[m+ N] = p[1];
u[m+2*N] = p[2];
}
}
|
LogSoftMax.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/LogSoftMax.c"
#else
#ifdef _MSC_VER
#define LOG_SOFTMAX_SIZE_TYPE int64_t
#define LOG_SOFTMAX_CAST_TYPE (int64_t)
#else
#define LOG_SOFTMAX_SIZE_TYPE uint64_t
#define LOG_SOFTMAX_CAST_TYPE
#endif
void THNN_(LogSoftMax_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
int64_t dim) {
THArgCheck(dim >= 0 && dim < input->nDimension, 4,
"dim out of range (got %d, but input has %d dims)", dim, input->nDimension);
uint64_t outer_size = 1;
uint64_t dim_size = input->size[dim];
uint64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= input->size[i];
for (int64_t i = dim + 1; i < input->nDimension; ++i)
inner_size *= input->size[i];
input = THTensor_(newContiguous)(input);
THTensor_(resizeAs)(output, input);
real *input_data_base = THTensor_(data)(input);
real *output_data_base = THTensor_(data)(output);
uint64_t dim_stride = inner_size;
uint64_t outer_stride = dim_size * dim_stride;
LOG_SOFTMAX_SIZE_TYPE i, d;
#pragma omp parallel for private(i, d)
for (i = 0; i < LOG_SOFTMAX_CAST_TYPE (outer_size * inner_size); i++)
{
uint64_t outer_idx = i / inner_size;
uint64_t inner_idx = i % inner_size;
real *input_data = input_data_base + outer_idx * outer_stride + inner_idx;
real *output_data = output_data_base + outer_idx * outer_stride + inner_idx;
real max_input = -THInf;
for (d = 0; d < LOG_SOFTMAX_CAST_TYPE dim_size; d++)
max_input = THMax(max_input, input_data[d * dim_stride]);
accreal logsum = 0;
for (d = 0; d < LOG_SOFTMAX_CAST_TYPE dim_size; d++)
logsum += exp(input_data[d * dim_stride] - max_input);
logsum = max_input + log(logsum);
for (d = 0; d < LOG_SOFTMAX_CAST_TYPE dim_size; d++)
output_data[d * dim_stride] = input_data[d * dim_stride] - logsum;
}
THTensor_(free)(input);
}
void THNN_(LogSoftMax_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *output,
int64_t dim)
{
THNN_CHECK_SHAPE(output, gradOutput);
THArgCheck(dim >= 0 && dim < output->nDimension, 6,
"dim out of range (got %d, but input has %d dims)", dim, output->nDimension);
uint64_t outer_size = 1;
uint64_t dim_size = output->size[dim];
uint64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= output->size[i];
for (int64_t i = dim + 1; i < output->nDimension; ++i)
inner_size *= output->size[i];
gradOutput = THTensor_(newContiguous)(gradOutput);
output = THTensor_(newContiguous)(output);
THTensor_(resizeAs)(gradInput, output);
real *gradInput_data_base = THTensor_(data)(gradInput);
real *output_data_base = THTensor_(data)(output);
real *gradOutput_data_base = THTensor_(data)(gradOutput);
uint64_t dim_stride = inner_size;
uint64_t outer_stride = dim_size * dim_stride;
LOG_SOFTMAX_SIZE_TYPE i, d;
#pragma omp parallel for private(i, d)
for (i = 0; i < LOG_SOFTMAX_CAST_TYPE (outer_size * inner_size); i++)
{
uint64_t outer_idx = i / inner_size;
uint64_t inner_idx = i % inner_size;
real *gradInput_data = gradInput_data_base + outer_idx * outer_stride + inner_idx;
real *output_data = output_data_base + outer_idx * outer_stride + inner_idx;
real *gradOutput_data = gradOutput_data_base + outer_idx * outer_stride + inner_idx;
accreal sum = 0;
for (d = 0; d < LOG_SOFTMAX_CAST_TYPE dim_size; d++)
sum += gradOutput_data[d * dim_stride];
for (d = 0; d < LOG_SOFTMAX_CAST_TYPE dim_size; d++)
gradInput_data[d * dim_stride] = gradOutput_data[d * dim_stride] - exp(output_data[d * dim_stride]) * sum;
}
THTensor_(free)(gradOutput);
THTensor_(free)(output);
}
#endif
|
generator_spgemm_csc_asparse.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/libxsmm/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
/**
* @file
* This file is part of GemmCodeGenerator.
*
* @author Alexander Heinecke (alexander.heinecke AT mytum.de, http://www5.in.tum.de/wiki/index.php/Alexander_Heinecke,_M.Sc.,_M.Sc._with_honors)
*
* @section LICENSE
* Copyright (c) 2012-2014, Technische Universitaet Muenchen
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @section DESCRIPTION
* <DESCRIPTION>
*/
#include "generator_spgemm_csc_asparse.h"
#include "generator_common.h"
#include "libxsmm_main.h"
LIBXSMM_API_INTERN
void libxsmm_sparse_csc_asparse_innerloop_scalar( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_k,
const unsigned int i_z,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_load_sd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_load_sd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_sd(c%u_%u, _mm_mul_sd(a%u_%u, _mm256_castpd256_pd128(b%u)));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_sd(c%u_%u, _mm_mul_sd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_sd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_load_ss(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_load_ss(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ss(c%u_%u, _mm_mul_ss(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_ss(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_sparse_csc_asparse_innerloop_two_vector( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_k,
const unsigned int i_z,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_loadu_pd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, _mm256_castpd256_pd128(b%u)));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_castpd_ps(_mm_load_sd((const double*)&C[(l_n*%u)+%u]));\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_castpd_ps(_mm_load_sd((const double*)&A[%u]));\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ps(c%u_%u, _mm_mul_ps(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_sd((double*)&C[(l_n*%u)+%u], _mm_castps_pd(c%u_%u));\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_sparse_csc_asparse_innerloop_four_vector( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_k,
const unsigned int i_z,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
unsigned int l_i;
unsigned int l_z = i_z;
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m256d c%u_%u = _mm256_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m256d a%u_%u = _mm256_loadu_pd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm256_add_pd(c%u_%u, _mm256_mul_pd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm256_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
for ( l_i = 0; l_i < 2; l_i++ ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, l_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + l_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_loadu_pd(&A[%u]);\n", i_k, l_z, i_column_idx[i_k] + l_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, b%u));\n", i_k, l_z, i_k, l_z, i_k, l_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + l_z], i_k, l_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_z += 2;
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_loadu_ps(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_loadu_ps(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ps(c%u_%u, _mm_mul_ps(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_ps(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_spgemm_csc_asparse( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx,
const double* i_values ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
unsigned int l_k;
unsigned int l_flop_count = 0;
LIBXSMM_UNUSED(i_arch);
LIBXSMM_UNUSED(i_values);
/* loop over columns in C in generated code, we fully unroll inside each column */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_n = 0;\n #pragma nounroll_and_jam\n for ( l_n = 0; l_n < %u; l_n++) {\n", (unsigned int)i_xgemm_desc->n);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* reset the current column in C if needed */
if (0 != (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=0 */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_m = 0;\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
if ( i_xgemm_desc->m > 1 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n C[(l_n*%u)+l_m] = 0.0;\n }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc);
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n C[(l_n*%u)+l_m] = 0.0f;\n }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc);
}
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
assert(0 != i_column_idx);
/* loop over columns in A, rows in B and fully unroll */
for ( l_k = 0; l_k < (unsigned int)i_xgemm_desc->k; l_k++ ) {
unsigned int l_column_elements = i_column_idx[l_k + 1] - i_column_idx[l_k];
unsigned int l_z = 0;
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) || defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
if ( l_column_elements > 0 ) {
if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n __m256d b%u = _mm256_broadcast_sd(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n __m128d b%u = _mm_loaddup_pd(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n __m128 b%u = _mm_broadcast_ss(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n __m128 b%u = _mm_load_ss(&B[(l_n*%u)+%u]); b%u = _mm_shuffle_ps(b%u, b%u, 0x00);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k, l_k, l_k, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
/* loop over the columns of A and look for vectorization potential */
for ( l_z = 0; l_z < l_column_elements; l_z++ ) {
assert(0 != i_row_idx);
/* 4 element vector might be possible */
if ( (l_z < (l_column_elements - 3)) && (l_column_elements > 3) ) {
/* check for 256bit vector instruction */
if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) &&
(i_row_idx[i_column_idx[l_k] + l_z] + 2 == i_row_idx[i_column_idx[l_k] + l_z + 2]) &&
(i_row_idx[i_column_idx[l_k] + l_z] + 3 == i_row_idx[i_column_idx[l_k] + l_z + 3]) &&
(i_row_idx[i_column_idx[l_k] + l_z + 3] < (unsigned int)i_xgemm_desc->m)) {
libxsmm_sparse_csc_asparse_innerloop_four_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
l_z += 3;
/* check for 128bit vector instruction */
} else if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) &&
(i_row_idx[i_column_idx[l_k] + l_z + 1] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_two_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
l_z++;
/* scalar instruction */
} else {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
}
}
/* 2 element vector might be possible */
} else if ( (l_z < (l_column_elements - 1)) && (l_column_elements > 1)) {
/* check for 128bit vector instruction */
if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) &&
(i_row_idx[i_column_idx[l_k] + l_z + 1] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_two_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
l_z++;
/* scalar instruction */
} else {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
}
}
/* scalar anyways */
} else {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
}
}
}
/* C fallback code */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#else\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* loop over the columns of A */
for ( l_z = 0; l_z < l_column_elements; l_z++ ) {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " C[(l_n*%u)+%u] += A[%u] * B[(l_n*%u)+%u];\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[l_k] + l_z], i_column_idx[l_k] + l_z, (unsigned int)i_xgemm_desc->ldb, l_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_flop_count += 2;
}
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* add flop counter */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n#ifndef NDEBUG\n#ifdef _OPENMP\n#pragma omp atomic\n#endif\nlibxsmm_num_total_flops += %u;\n#endif\n", l_flop_count * i_xgemm_desc->n);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
|
pooling_3x3_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void pooling3x3s2_max_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
"fmax v16.4s, v0.4s, v1.4s \n"
"fmax v17.4s, v2.4s, v3.4s \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"fmax v18.4s, v4.4s, v5.4s \n"
"fmax v19.4s, v6.4s, v7.4s \n"
"ld1 {v8.4s}, [%1] \n"
"fmax v20.4s, v16.4s, v2.4s \n"
"fmax v21.4s, v17.4s, v4.4s \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"fmax v22.4s, v18.4s, v6.4s \n"
"fmax v23.4s, v19.4s, v8.4s \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n"
"fmax v16.4s, v0.4s, v1.4s \n"
"fmax v17.4s, v2.4s, v3.4s \n"
"fmax v18.4s, v4.4s, v5.4s \n"
"fmax v19.4s, v6.4s, v7.4s \n"
"ld1 {v8.4s}, [%2] \n"
"fmax v24.4s, v16.4s, v2.4s \n"
"fmax v25.4s, v17.4s, v4.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmax v26.4s, v18.4s, v6.4s \n"
"fmax v27.4s, v19.4s, v8.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n"
"fmax v16.4s, v0.4s, v1.4s \n"
"fmax v17.4s, v2.4s, v3.4s \n"
"fmax v18.4s, v4.4s, v5.4s \n"
"fmax v19.4s, v6.4s, v7.4s \n"
"ld1 {v8.4s}, [%3] \n"
"fmax v28.4s, v16.4s, v2.4s \n"
"fmax v29.4s, v17.4s, v4.4s \n"
"fmax v30.4s, v18.4s, v6.4s \n"
"fmax v31.4s, v19.4s, v8.4s \n"
"fmax v20.4s, v20.4s, v24.4s \n"
"fmax v21.4s, v21.4s, v25.4s \n"
"fmax v22.4s, v22.4s, v26.4s \n"
"fmax v23.4s, v23.4s, v27.4s \n"
"fmax v20.4s, v20.4s, v28.4s \n"
"fmax v21.4s, v21.4s, v29.4s \n"
"fmax v22.4s, v22.4s, v30.4s \n"
"fmax v23.4s, v23.4s, v31.4s \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
#else // __aarch64__
asm volatile(
"pld [%1, #512] \n"
"vldm %1!, {d0-d7} \n"
"pld [%2, #512] \n"
"vldm %2!, {d8-d15} \n"
"vmax.f32 q0, q0, q4 \n"
"vmax.f32 q1, q1, q5 \n"
"pld [%3, #512] \n"
"vldm %3!, {d16-d23} \n"
"vmax.f32 q2, q2, q6 \n"
"vmax.f32 q3, q3, q7 \n"
"vmax.f32 q0, q0, q8 \n"
"vmax.f32 q1, q1, q9 \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
"vmax.f32 q2, q2, q10 \n"
"vmax.f32 q3, q3, q11 \n"
"pld [%2, #512] \n"
"vldm %2!, {d16-d23} \n"
"vmax.f32 q4, q4, q8 \n"
"vmax.f32 q5, q5, q9 \n"
"pld [%3, #512] \n"
"vldm %3!, {d24-d31} \n"
"vmax.f32 q6, q6, q10 \n"
"vmax.f32 q7, q7, q11 \n"
"vmax.f32 q4, q4, q12 \n"
"vmax.f32 q5, q5, q13 \n"
"vld1.f32 {d24-d25}, [%1 :128] \n"
"vld1.f32 {d26-d27}, [%2 :128] \n"
"vmax.f32 q6, q6, q14 \n"
"vmax.f32 q7, q7, q15 \n"
"vld1.f32 {d28-d29}, [%3 :128] \n"
"vmax.f32 q8, q12, q13 \n"
"vmax.f32 q8, q8, q14 \n"
"vmax.f32 q12, q0, q1 \n"
"vmax.f32 q13, q2, q3 \n"
"vmax.f32 q14, q4, q5 \n"
"vmax.f32 q15, q6, q7 \n"
"vmax.f32 q12, q12, q2 \n"
"vmax.f32 q13, q13, q4 \n"
"vmax.f32 q14, q14, q6 \n"
"vmax.f32 q15, q15, q8 \n"
"vstm %0!, {d24-d31} \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n"
"fmax v16.4s, v0.4s, v4.4s \n"
"fmax v17.4s, v1.4s, v5.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%3], #64 \n"
"fmax v18.4s, v2.4s, v6.4s \n"
"fmax v19.4s, v3.4s, v7.4s \n"
"ld1 {v0.4s}, [%1] \n"
"fmax v16.4s, v16.4s, v20.4s \n"
"fmax v17.4s, v17.4s, v21.4s \n"
"ld1 {v1.4s}, [%2] \n"
"fmax v18.4s, v18.4s, v22.4s \n"
"fmax v19.4s, v19.4s, v23.4s \n"
"ld1 {v2.4s}, [%3] \n"
"fmax v3.4s, v0.4s, v1.4s \n"
"fmax v20.4s, v16.4s, v17.4s \n"
"fmax v21.4s, v18.4s, v19.4s \n"
"fmax v3.4s, v3.4s, v2.4s \n"
"fmax v20.4s, v20.4s, v18.4s \n"
"fmax v21.4s, v21.4s, v3.4s \n"
"st1 {v20.4s, v21.4s}, [%0], #32 \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else // __aarch64__
asm volatile(
"pld [%1, #512] \n"
"vldm %1!, {d0-d7} \n"
"pld [%2, #512] \n"
"vldm %2!, {d8-d15} \n"
"vmax.f32 q12, q0, q4 \n"
"vmax.f32 q13, q1, q5 \n"
"pld [%3, #512] \n"
"vldm %3!, {d16-d23} \n"
"vmax.f32 q14, q2, q6 \n"
"vmax.f32 q15, q3, q7 \n"
"vld1.f32 {d0-d1}, [%1 :128] \n"
"vmax.f32 q12, q12, q8 \n"
"vmax.f32 q13, q13, q9 \n"
"vld1.f32 {d2-d3}, [%2 :128] \n"
"vmax.f32 q14, q14, q10 \n"
"vmax.f32 q15, q15, q11 \n"
"vld1.f32 {d4-d5}, [%3 :128] \n"
"vmax.f32 q3, q0, q1 \n"
"vmax.f32 q4, q12, q13 \n"
"vmax.f32 q5, q14, q15 \n"
"vmax.f32 q3, q3, q2 \n"
"vmax.f32 q4, q4, q14 \n"
"vmax.f32 q5, q5, q3 \n"
"vst1.f32 {d8-d11}, [%0 :128]! \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1 + 4);
float32x4_t _r12 = vld1q_f32(r1 + 8);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2 + 4);
float32x4_t _r22 = vld1q_f32(r2 + 8);
float32x4_t _max0 = vmaxq_f32(vmaxq_f32(_r00, _r01), _r02);
float32x4_t _max1 = vmaxq_f32(vmaxq_f32(_r10, _r11), _r12);
float32x4_t _max2 = vmaxq_f32(vmaxq_f32(_r20, _r21), _r22);
float32x4_t _max = vmaxq_f32(vmaxq_f32(_max0, _max1), _max2);
vst1q_f32(outptr, _max);
r0 += 8;
r1 += 8;
r2 += 8;
outptr += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
StressCPU.c | #include <stdio.h>
#include <omp.h>
int main() {
int current = 1;
int totalPrimes = 0;
int threshold = 1000000;
while(1) {
#pragma omp parallel for schedule(dynamic) reduction(+ : totalPrimes)
for (current = 1; current <= threshold; current++) {
int i = 2;
while(i <= current) {
if(current % i == 0)
break;
i++;
}
if(i == current)
totalPrimes++;
}
}
printf("%d prime numbers under %d\n",totalPrimes,threshold);
return 0;
}
|
pooling2x2s2_max_neon.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void pooling2x2s2_max_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2*outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #256] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%1], #32 \n"
"ld1 {v2.4s, v3.4s}, [%2], #32 \n"
"fmax v0.4s, v0.4s, v2.4s \n"
"fmax v1.4s, v1.4s, v3.4s \n"
"fmaxp v2.4s, v0.4s, v1.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v2.4s}, [%3], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%1]! \n"
"vld1.f32 {d4-d7}, [%2]! \n"
"vmax.f32 q0, q0, q2 \n"
"vmax.f32 q1, q1, q3 \n"
"vpmax.f32 d4, d0, d1 \n"
"vpmax.f32 d5, d2, d3 \n"
"subs %0, #1 \n"
"vst1.f32 {d4-d5}, [%3]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr)
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float max0 = std::max(r0[0], r0[1]);
float max1 = std::max(r1[0], r1[1]);
*outptr = std::max(max0, max1);
r0 += 2;
r1 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
}
}
}
}
|
GB_unop__cos_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__cos_fc64_fc64
// op(A') function: GB_unop_tran__cos_fc64_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = ccos (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ccos (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = ccos (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COS || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__cos_fc64_fc64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = ccos (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__cos_fc64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
verlet_accelerate.c |
#include "verlet.h"
#include "cloud.h"
void
verlet_step_accelerate (int Np, double dt, const double *restrict X[3], double *restrict U[3])
{
#pragma omp parallel for
for (int i = 0; i < Np; i++) {
double u[3] = {0.};
for (int j = 0; j < Np; j++) {
if (j != i) {
double du[3];
force (dt, X[0][i], X[1][i], X[2][i], X[0][j], X[1][j], X[2][j], du);
for (int d = 0; d < 3; d++) {
u[d] += du[d];
}
}
}
for (int d = 0; d < 3; d++) {
U[d][i] += u[d];
}
}
}
|
bond_order.c |
/*******************************************************************************
** Calculate Steinhardt order parameters
** W. Lechner and C. Dellago. J. Chem.Phys. 129, 114707 (2008)
*******************************************************************************/
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include <Python.h> // includes stdio.h, string.h, errno.h, stdlib.h
#include <numpy/arrayobject.h>
#include <math.h>
#include "visclibs/boxeslib.h"
#include "visclibs/neb_list.h"
#include "visclibs/utilities.h"
#include "visclibs/array_utils.h"
#include "visclibs/constants.h"
#include "gui/preferences.h"
#if PY_MAJOR_VERSION >= 3
#define MOD_ERROR_VAL NULL
#define MOD_SUCCESS_VAL(val) val
#define MOD_INIT(name) PyMODINIT_FUNC PyInit_##name(void)
#define MOD_DEF(ob, name, doc, methods) \
static struct PyModuleDef moduledef = { \
PyModuleDef_HEAD_INIT, name, doc, -1, methods, }; \
ob = PyModule_Create(&moduledef);
#else
#define MOD_ERROR_VAL
#define MOD_SUCCESS_VAL(val)
#define MOD_INIT(name) void init##name(void)
#define MOD_DEF(ob, name, doc, methods) \
ob = Py_InitModule3(name, methods, doc);
#endif
/* structure for storing the result for an atom */
struct AtomStructureResults
{
double Q6;
double Q4;
double realQ4[9];
double imgQ4[9];
double realQ6[13];
double imgQ6[13];
};
static PyObject* bondOrderFilter(PyObject*, PyObject*);
static void Ylm(int, int, double, double, double*, double*);
static void convertToSphericalCoordinates(double, double, double, double, double*, double*);
static void complex_qlm(int, int*, struct NeighbourList*, double*, double*, int*, struct AtomStructureResults*);
static void calculate_Q(int, struct AtomStructureResults*);
const double factorials[] = {1.0, 1.0, 2.0, 6.0, 24.0, 120.0, 720.0, 5040.0, 40320.0, 362880.0, 3628800.0, 39916800.0, 479001600.0};
/*******************************************************************************
** List of python methods available in this module
*******************************************************************************/
static struct PyMethodDef module_methods[] = {
{"bondOrderFilter", bondOrderFilter, METH_VARARGS, "Run the bond order filter (calculates Steinhardt order parameters)"},
{NULL, NULL, 0, NULL}
};
/*******************************************************************************
** Module initialisation function
*******************************************************************************/
MOD_INIT(_bond_order)
{
PyObject *mod;
MOD_DEF(mod, "_bond_order", "Calculate Steinhardt order parameters", module_methods)
if (mod == NULL)
return MOD_ERROR_VAL;
import_array();
return MOD_SUCCESS_VAL(mod);
}
/*******************************************************************************
** Compute P_lm (numerical recipes Ch.8 p.254)
*******************************************************************************/
double Plm(int l, int m, double x)
{
double pmm;
if (m < 0 || m > l || fabs(x) > 1.0)
{
fprintf(stderr, "Bad arguments to in routine Plm\n");
exit(1);
}
pmm = 1.0;
if (m > 0)
{
int i;
double somx2, fact;
somx2 = sqrt((1.0 - x) * (1.0 + x));
fact = 1.0;
for (i = 1; i <= m; i++)
{
pmm *= - fact * somx2;
fact += 2.0;
}
}
if (l == m)
return pmm;
else
{
double pmmp1;
pmmp1 = x * (2.0 * m + 1.0) * pmm;
if (l == m + 1)
return pmmp1;
else
{
int ll;
double pll = 0.0;
for (ll = m + 2; ll <= l; ll++)
{
pll = (x * (2.0 * ll - 1.0) * pmmp1 - (ll + m - 1) * pmm) / (ll - m);
pmm = pmmp1;
pmmp1 = pll;
}
return pll;
}
}
}
/*******************************************************************************
** Compute Y_lm (spherical harmonics)
*******************************************************************************/
static void Ylm(int l, int m, double theta, double phi, double *realYlm, double *imgYlm)
{
double factor, arg, val;
val = Plm(l, m, cos(theta));
factor = ((2.0 * (double)l + 1.0) * factorials[l - m]) / (4.0 * CONST_PI * factorials[l + m]);
factor = sqrt(factor);
arg = (double) m * phi;
*realYlm = factor * val * cos(arg);
*imgYlm = factor * val * sin(arg);
}
/*******************************************************************************
** Convert to spherical coordinates
*******************************************************************************/
static void convertToSphericalCoordinates(double xdiff, double ydiff, double zdiff, double sep, double *phi, double *theta)
{
*theta = acos(zdiff / sep);
*phi = atan2(ydiff, xdiff);
}
/*******************************************************************************
** Compute complex q_lm (sum over eq. 3 from Stukowski paper), for each atom
*******************************************************************************/
static void complex_qlm(int NVisibleIn, int *visibleAtoms, struct NeighbourList *nebList, double *pos, double *cellDims,
int *PBC, struct AtomStructureResults *results)
{
int visIndex;
/* loop over atoms */
#pragma omp parallel for num_threads(prefs_numThreads)
for (visIndex = 0; visIndex < NVisibleIn; visIndex++)
{
int index, m;
double xpos1, ypos1, zpos1;
/* atom 1 position */
index = visibleAtoms[visIndex];
xpos1 = pos[3*index];
ypos1 = pos[3*index+1];
zpos1 = pos[3*index+2];
/* loop over m, l = 6 */
for (m = -6; m < 7; m++)
{
int i;
double real_part, img_part;
/* loop over neighbours */
real_part = 0.0;
img_part = 0.0;
for (i = 0; i < nebList[visIndex].neighbourCount; i++)
{
int visIndex2, index2;
double xpos2, ypos2, zpos2, sepVec[3];
double theta, phi, realYlm, complexYlm;
/* atom 2 position */
visIndex2 = nebList[visIndex].neighbour[i];
index2 = visibleAtoms[visIndex2];
xpos2 = pos[3*index2];
ypos2 = pos[3*index2+1];
zpos2 = pos[3*index2+2];
/* calculate separation vector between atoms */
atomSeparationVector(sepVec, xpos1, ypos1, zpos1, xpos2, ypos2, zpos2, cellDims[0], cellDims[1], cellDims[2], PBC[0], PBC[1], PBC[2]);
/* convert to spherical coordinates */
convertToSphericalCoordinates(sepVec[0], sepVec[1], sepVec[2], nebList[visIndex].neighbourSep[i], &phi, &theta);
/* calculate Ylm */
if (m < 0)
{
Ylm(6, abs(m), theta, phi, &realYlm, &complexYlm);
realYlm = pow(-1.0, m) * realYlm;
complexYlm = pow(-1.0, m) * complexYlm;
}
else
{
Ylm(6, m, theta, phi, &realYlm, &complexYlm);
}
/* sum */
real_part += realYlm;
img_part += complexYlm;
}
/* divide by number of neighbours */
results[visIndex].realQ6[m+6] = real_part / ((double) nebList[visIndex].neighbourCount);
results[visIndex].imgQ6[m+6] = img_part / ((double) nebList[visIndex].neighbourCount);
}
/* loop over m, l = 4 */
for (m = -4; m < 5; m++)
{
int i;
double real_part, img_part;
/* loop over neighbours */
real_part = 0.0;
img_part = 0.0;
for (i = 0; i < nebList[visIndex].neighbourCount; i++)
{
int visIndex2, index2;
double xpos2, ypos2, zpos2, sepVec[3];
double theta, phi, realYlm, complexYlm;
/* atom 2 position */
visIndex2 = nebList[visIndex].neighbour[i];
index2 = visibleAtoms[visIndex2];
xpos2 = pos[3*index2];
ypos2 = pos[3*index2+1];
zpos2 = pos[3*index2+2];
/* calculate separation vector */
atomSeparationVector(sepVec, xpos1, ypos1, zpos1, xpos2, ypos2, zpos2, cellDims[0], cellDims[1], cellDims[2], PBC[0], PBC[1], PBC[2]);
/* convert to spherical coordinates */
convertToSphericalCoordinates(sepVec[0], sepVec[1], sepVec[2], nebList[visIndex].neighbourSep[i], &phi, &theta);
/* calculate Ylm */
if (m < 0)
{
Ylm(4, abs(m), theta, phi, &realYlm, &complexYlm);
realYlm = pow(-1.0, m) * realYlm;
complexYlm = pow(-1.0, m) * complexYlm;
}
else
{
Ylm(4, m, theta, phi, &realYlm, &complexYlm);
}
/* sum */
real_part += realYlm;
img_part += complexYlm;
}
/* divide by number of neighbours */
results[visIndex].realQ4[m+4] = real_part / ((double) nebList[visIndex].neighbourCount);
results[visIndex].imgQ4[m+4] = img_part / ((double) nebList[visIndex].neighbourCount);
}
}
}
/*******************************************************************************
** calculate Q4/6 from complex q_lm's
*******************************************************************************/
static void calculate_Q(int NVisibleIn, struct AtomStructureResults *results)
{
int i, m;
double sumQ6, sumQ4;
const double pi13 = 4.0 * CONST_PI / 13.0;
const double pi9 = 4.0 * CONST_PI / 9.0;
/* loop over atoms and compute the Q4 and Q6 values */
for (i = 0; i < NVisibleIn; i++)
{
sumQ6 = 0.0;
for (m = 0; m < 13; m++)
{
sumQ6 += results[i].realQ6[m] * results[i].realQ6[m] + results[i].imgQ6[m] * results[i].imgQ6[m];
}
results[i].Q6 = pow(pi13 * sumQ6, 0.5);
sumQ4 = 0.0;
for (m = 0; m < 9; m++)
{
sumQ4 += results[i].realQ4[m] * results[i].realQ4[m] + results[i].imgQ4[m] * results[i].imgQ4[m];
}
results[i].Q4 = pow(pi9 * sumQ4, 0.5);
}
}
/*******************************************************************************
** Calculate the bond order parameters and filter atoms (if required).
**
** Inputs:
** - visibleAtoms: the list of atoms that are currently visible
** - pos: positions of all the atoms
** - maxBondDistance: the maximum bond distance to consider
** - scalarsQ4: array to store the Q4 parameter value
** - scalarsQ6: array to store the Q6 parameter value
** - cellDims: simulation cell dimensions
** - PBC: periodic boundaries conditions
** - NScalars: the number of previously calculated scalar values
** - fullScalars: the full list of previously calculated scalars
** - NVectors: the number of previously calculated vector values
** - fullVectors: the full list of previously calculated vectors
** - filterQ4: filter atoms by the Q4 parameter
** - minQ4: the minimum Q4 for an atom to be visible
** - maxQ4: the maximum Q4 for an atom to be visible
** - filterQ6: filter atoms by the Q6 parameter
** - minQ6: the minimum Q6 for an atom to be visible
** - maxQ6: the maximum Q6 for an atom to be visible
*******************************************************************************/
static PyObject*
bondOrderFilter(PyObject *self, PyObject *args)
{
int NVisibleIn, *visibleAtoms, *PBC, NScalars, filterQ4Enabled, filterQ6Enabled;
int NVectors;
double maxBondDistance, *scalarsQ4, *scalarsQ6, *cellDims;
double *pos, *fullScalars, minQ4, maxQ4, minQ6, maxQ6;
PyArrayObject *posIn=NULL;
PyArrayObject *visibleAtomsIn=NULL;
PyArrayObject *PBCIn=NULL;
PyArrayObject *scalarsQ4In=NULL;
PyArrayObject *scalarsQ6In=NULL;
PyArrayObject *cellDimsIn=NULL;
PyArrayObject *fullScalarsIn=NULL;
PyArrayObject *fullVectors=NULL;
int i, NVisible, boxstat;
double *visiblePos, maxSep2;
struct Boxes *boxes;
struct NeighbourList *nebList;
struct AtomStructureResults *results;
/* parse and check arguments from Python */
if (!PyArg_ParseTuple(args, "O!O!dO!O!O!O!iO!iddiddiO!", &PyArray_Type, &visibleAtomsIn, &PyArray_Type, &posIn, &maxBondDistance,
&PyArray_Type, &scalarsQ4In, &PyArray_Type, &scalarsQ6In, &PyArray_Type, &cellDimsIn, &PyArray_Type, &PBCIn, &NScalars,
&PyArray_Type, &fullScalarsIn, &filterQ4Enabled, &minQ4, &maxQ4, &filterQ6Enabled, &minQ6, &maxQ6, &NVectors,
&PyArray_Type, &fullVectors))
return NULL;
if (not_intVector(visibleAtomsIn)) return NULL;
visibleAtoms = pyvector_to_Cptr_int(visibleAtomsIn);
NVisibleIn = (int) PyArray_DIM(visibleAtomsIn, 0);
if (not_doubleVector(posIn)) return NULL;
pos = pyvector_to_Cptr_double(posIn);
if (not_doubleVector(scalarsQ4In)) return NULL;
scalarsQ4 = pyvector_to_Cptr_double(scalarsQ4In);
if (not_doubleVector(scalarsQ6In)) return NULL;
scalarsQ6 = pyvector_to_Cptr_double(scalarsQ6In);
if (not_doubleVector(cellDimsIn)) return NULL;
cellDims = pyvector_to_Cptr_double(cellDimsIn);
if (not_intVector(PBCIn)) return NULL;
PBC = pyvector_to_Cptr_int(PBCIn);
if (not_doubleVector(fullScalarsIn)) return NULL;
fullScalars = pyvector_to_Cptr_double(fullScalarsIn);
if (not_doubleVector(fullVectors)) return NULL;
/* construct array of positions of visible atoms */
visiblePos = malloc(3 * NVisibleIn * sizeof(double));
if (visiblePos == NULL)
{
PyErr_SetString(PyExc_MemoryError, "Could not allocate visiblePos");
return NULL;
}
for (i = 0; i < NVisibleIn; i++)
{
int index = visibleAtoms[i];
int ind3 = 3 * index;
int i3 = 3 * i;
visiblePos[i3 ] = pos[ind3 ];
visiblePos[i3 + 1] = pos[ind3 + 1];
visiblePos[i3 + 2] = pos[ind3 + 2];
}
/* box visible atoms */
boxes = setupBoxes(maxBondDistance, PBC, cellDims);
if (boxes == NULL)
{
free(visiblePos);
return NULL;
}
boxstat = putAtomsInBoxes(NVisibleIn, visiblePos, boxes);
if (boxstat)
{
free(visiblePos);
return NULL;
}
/* build neighbour list */
maxSep2 = maxBondDistance * maxBondDistance;
nebList = constructNeighbourList(NVisibleIn, visiblePos, boxes, cellDims, PBC, maxSep2);
/* only required for building neb list */
free(visiblePos);
freeBoxes(boxes);
/* return if failed to build the neighbour list */
if (nebList == NULL) return NULL;
/* allocate results structure */
results = malloc(NVisibleIn * sizeof(struct AtomStructureResults));
if (results == NULL)
{
PyErr_SetString(PyExc_MemoryError, "Could not allocate results");
freeNeighbourList(nebList, NVisibleIn);
return NULL;
}
/* first calc q_lm for each atom over all m values */
complex_qlm(NVisibleIn, visibleAtoms, nebList, pos, cellDims, PBC, results);
/* free neighbour list */
freeNeighbourList(nebList, NVisibleIn);
/* calculate Q4 and Q6 */
calculate_Q(NVisibleIn, results);
/* do filtering here, storing results along the way */
NVisible = 0;
for (i = 0; i < NVisibleIn; i++)
{
int j;
double q4 = results[i].Q4;
double q6 = results[i].Q6;
/* skip if not within the valid range */
if (filterQ4Enabled && (q4 < minQ4 || q4 > maxQ4))
continue;
if (filterQ6Enabled && (q6 < minQ6 || q6 > maxQ6))
continue;
/* store in visible atoms array */
visibleAtoms[NVisible] = visibleAtoms[i];
/* store calculated values */
scalarsQ4[NVisible] = q4;
scalarsQ6[NVisible] = q6;
/* update full scalars/vectors arrays */
for (j = 0; j < NScalars; j++)
{
int nj = j * NVisibleIn;
fullScalars[nj + NVisible] = fullScalars[nj + i];
}
for (j = 0; j < NVectors; j++)
{
int nj = j * NVisibleIn;
DIND2(fullVectors, nj + NVisible, 0) = DIND2(fullVectors, nj + i, 0);
DIND2(fullVectors, nj + NVisible, 1) = DIND2(fullVectors, nj + i, 1);
DIND2(fullVectors, nj + NVisible, 2) = DIND2(fullVectors, nj + i, 2);
}
NVisible++;
}
/* free results memory */
free(results);
return Py_BuildValue("i", NVisible);
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include "omp.h"
#include "functions.h"
int main (int argc, char **argv) {
int Nthreads = 1; // change this every time you run this
omp_set_num_threads(Nthreads);
//seed value for the randomizer
double seed = clock(); //this will make your program run differently everytime
//double seed = 0; //uncomment this and your program will behave the same everytime it's run
srand(seed);
//declare storage for an ElGamal cryptosytem
unsigned int p, g, h, x;
//begin with rank 0 getting user's input
unsigned int n;
printf("Enter a number of bits: "); fflush(stdout);
char status = scanf("%u",&n);
//make sure the input makes sense
if ((n<8)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars)
printf("Unsupported bit size.\n");
return 0;
}
printf("\n");
//setup an ElGamal cryptosystem
setupElGamal(n,&p,&g,&h,&x);
int bufferSize = 1024;
unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char));
//populate the string with a message
strcpy(message, "Hello, this is the message as a string.");
printf("Message = \"%s\"\n", message);
/* Q1.1 Finish this line */
unsigned int charsPerInt = n/8;
padString(message, charsPerInt);
printf("Padded Message = \"%s\"\n", message);
unsigned int Nchars = strlen(message);
unsigned int Nints = strlen(message)/charsPerInt;
//storage for message as elements of Z_p
unsigned int *Zmessage =
(unsigned int *) malloc(Nints*sizeof(unsigned int));
//storage for extra encryption coefficient
unsigned int *a =
(unsigned int *) malloc(Nints*sizeof(unsigned int));
// cast the string into an unsigned int array
convertStringToZ(message, Nchars, Zmessage, Nints);
//Encrypt the Zmessage with the ElGamal cyrptographic system
ElGamalEncrypt(Zmessage,a,Nints,p,g,h);
printf("The encrypted text is: ");
for (unsigned int i=0;i<Nints;i++) {
printf("(%u,%u) ", Zmessage[i], a[i]);
}
printf("]\n");
//Decrypt the Zmessage with the ElGamal cyrptographic system
ElGamalDecrypt(Zmessage,a,Nints,p,x);
convertZToString(Zmessage, Nints, message, Nchars);
printf("Decrypted Message = \"%s\"\n", message);
printf("\n");
//Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel
printf("Using %d OpenMP threads to find the secret key...\n", Nthreads);
/* Q2.3 Parallelize this loop with OpenMP */
double startTime = omp_get_wtime();
unsigned int val = 0;
#pragma omp parallel for shared(val)
for (unsigned int i=0;i<p-1;i++) {
if (val == 0 && modExp(g,i+1,p)==h) {
printf("Secret key found! x = %u \n", i);
val = 1;
}
}
double endTime = omp_get_wtime();
double totalTime = endTime-startTime;
double work = (double) p;
double throughput = work/totalTime;
printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput);
return 0;
}
|
GB_unop__identity_fc64_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_int16)
// op(A') function: GB (_unop_tran__identity_fc64_int16)
// C type: GxB_FC64_t
// A type: int16_t
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_int16)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_sgemm_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv_im2col_sgemm_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, \
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char *kernel = _kernel;
// im2col
Mat bottom_im2col(outw*outh, kernel_h*kernel_w*inch, 1UL, opt.workspace_allocator);
{
const int stride = kernel_h*kernel_w*outw*outh;
signed char* ret = (signed char*)bottom_im2col;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<inch; p++)
{
const signed char* input = bottom_blob.channel(p);
int retID = stride * p;
for (int u=0; u<kernel_h; u++)
{
for (int v=0; v<kernel_w; v++)
{
for (int i=0; i<outh; i++)
{
for (int j=0; j<outw; j++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// bottom_im2col memory packed 4 x 8
Mat bottom_tm(8*kernel_size, inch, out_size/8 + out_size%8, (size_t)1u, opt.workspace_allocator);
{
int nn_size = out_size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 8;
const signed char* img0 = bottom_im2col.channel(0);
img0 += i;
signed char* tmpptr = bottom_tm.channel(i/8);
for (int q=0; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr[4] = img0[4];
tmpptr[5] = img0[5];
tmpptr[6] = img0[6];
tmpptr[7] = img0[7];
tmpptr += 8;
img0 += out_size;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<out_size; i++)
{
const signed char* img0 = bottom_im2col.channel(0);
img0 += i;
signed char* tmpptr = bottom_tm.channel(i/8 + i%8);
for (int q=0; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += out_size;
}
}
}
// kernel memory packed 4 x 8
Mat kernel_tm(4*kernel_size, inch, outch/4 + outch%4, (size_t)1u, opt.workspace_allocator);
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
const signed char* k1 = kernel + (p+1)*inch*kernel_size;
const signed char* k2 = kernel + (p+2)*inch*kernel_size;
const signed char* k3 = kernel + (p+3)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/4);
for (int q=0; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/4 + p%4);
for (int q=0; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
// sgemm(int M, int N, int L, float* A, float* B, float* C)
{
// int M = outch; // outch
int N = outw * outh; // outsize or out stride
int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int i = pp * 4;
int* output0 = top_blob.channel(i);
int* output1 = top_blob.channel(i+1);
int* output2 = top_blob.channel(i+2);
int* output3 = top_blob.channel(i+3);
int j=0;
for (; j+7<N; j=j+8)
{
signed char* vb = bottom_tm.channel(j/8);
signed char* va = kernel_tm.channel(i/4);
int sum0[8] = {0};
int sum1[8] = {0};
int sum2[8] = {0};
int sum3[8] = {0};
int k=0;
for (; k+7<L; k=k+8)
{
for (int n=0; n<8; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
va += 4;
sum0[n] += (int)va[0] * vb[n+8];
sum1[n] += (int)va[1] * vb[n+8];
sum2[n] += (int)va[2] * vb[n+8];
sum3[n] += (int)va[3] * vb[n+8];
va += 4;
sum0[n] += (int)va[0] * vb[n+16];
sum1[n] += (int)va[1] * vb[n+16];
sum2[n] += (int)va[2] * vb[n+16];
sum3[n] += (int)va[3] * vb[n+16];
va += 4;
sum0[n] += (int)va[0] * vb[n+24];
sum1[n] += (int)va[1] * vb[n+24];
sum2[n] += (int)va[2] * vb[n+24];
sum3[n] += (int)va[3] * vb[n+24];
va += 4;
sum0[n] += (int)va[0] * vb[n+32];
sum1[n] += (int)va[1] * vb[n+32];
sum2[n] += (int)va[2] * vb[n+32];
sum3[n] += (int)va[3] * vb[n+32];
va += 4;
sum0[n] += (int)va[0] * vb[n+40];
sum1[n] += (int)va[1] * vb[n+40];
sum2[n] += (int)va[2] * vb[n+40];
sum3[n] += (int)va[3] * vb[n+40];
va += 4;
sum0[n] += (int)va[0] * vb[n+48];
sum1[n] += (int)va[1] * vb[n+48];
sum2[n] += (int)va[2] * vb[n+48];
sum3[n] += (int)va[3] * vb[n+48];
va += 4;
sum0[n] += (int)va[0] * vb[n+56];
sum1[n] += (int)va[1] * vb[n+56];
sum2[n] += (int)va[2] * vb[n+56];
sum3[n] += (int)va[3] * vb[n+56];
va -= 28;
}
va += 32;
vb += 64;
}
for (; k<L; k++)
{
for (int n=0; n<8; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
}
va += 4;
vb += 8;
}
for (int n=0; n<8; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
}
for (; j<N; j++)
{
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
signed char* vb = bottom_tm.channel(j/8 + j%8);
signed char* va = kernel_tm.channel(i/4);
for (int k=0; k<L; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
output0++;
output1++;
output2++;
output3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_outch_start; i<outch; i++)
{
int* output = top_blob.channel(i);
int j=0;
for (; j+7<N; j=j+8)
{
signed char* vb = bottom_tm.channel(j/8);
signed char* va = kernel_tm.channel(i/4 + i%4);
int sum[8] = {0};
int k=0;
for (; k+7<L; k=k+8)
{
for (int n=0; n<8; n++)
{
sum[n] += (int)va[0] * vb[n];
sum[n] += (int)va[1] * vb[n+8];
sum[n] += (int)va[2] * vb[n+16];
sum[n] += (int)va[3] * vb[n+24];
sum[n] += (int)va[4] * vb[n+32];
sum[n] += (int)va[5] * vb[n+40];
sum[n] += (int)va[6] * vb[n+48];
sum[n] += (int)va[7] * vb[n+56];
}
va += 8;
vb += 64;
}
for (; k<L; k++)
{
for (int n=0; n<8; n++)
{
sum[n] += (int)va[0] * vb[n];
}
va += 1;
vb += 8;
}
for (int n=0; n<8; n++)
{
output[n] = sum[n];
}
output += 8;
}
for (; j<N; j++)
{
int sum = 0;
signed char* vb = bottom_tm.channel(j/8 + j%8);
signed char* va = kernel_tm.channel(i/4 + i%4);
for (int k=0; k<L; k++)
{
sum += (int)va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum;
output++;
}
}
}
}
|
wshfl.c | /* Copyright 2018-2019. Massachusetts Institute of Technology.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2018-2019 Siddharth Iyer <ssi@mit.edu>
*
* Tamir J, Uecker M, Chen W, Lai P, Alley MT, Vasanawala SS, Lustig M.
* T2 shuffling: Sharp, multicontrast, volumetric fast spin‐echo imaging.
* Magnetic resonance in medicine. 2017 Jan 1;77(1):180-95.
*
* B Bilgic, BA Gagoski, SF Cauley, AP Fan, JR Polimeni, PE Grant,
* LL Wald, and K Setsompop, Wave-CAIPI for highly accelerated 3D
* imaging. Magn Reson Med (2014) doi: 10.1002/mrm.25347
*
* Iyer S, Bilgic B, Setsompop K.
* Faster T2 shuffling with Wave.
* Presented in the session: "Signal Encoding and Decoding" at ISMRM 2018.
* https://www.ismrm.org/18/program_files/O67.htm
*/
#include <stdbool.h>
#include <complex.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "num/multind.h"
#include "num/flpmath.h"
#include "num/fft.h"
#include "num/init.h"
#include "num/iovec.h"
#include "num/ops.h"
#include "num/ops_p.h"
#ifdef USE_CUDA
#include "num/gpuops.h"
#endif
#include "iter/iter.h"
#include "iter/lsqr.h"
#include "iter/misc.h"
#include "linops/linop.h"
#include "linops/fmac.h"
#include "linops/someops.h"
#include "linops/decompose_complex.h"
#include "misc/debug.h"
#include "misc/mri.h"
#include "misc/utils.h"
#include "misc/mmio.h"
#include "misc/misc.h"
#include "misc/opts.h"
#include "wavelet/wavthresh.h"
#include "lowrank/lrthresh.h"
#include "grecon/optreg.h"
#include "grecon/italgo.h"
static const char usage_str[] = "<maps> <wave> <phi> <reorder> <table> <output>";
static const char help_str[] =
"Perform a wave-shuffling reconstruction.\n\n"
"Conventions:\n"
" * (sx, sy, sz) - Spatial dimensions.\n"
" * wx - Extended FOV in READ_DIM due to\n"
" wave's voxel spreading.\n"
" * (nc, md) - Number of channels and ESPIRiT's \n"
" extended-SENSE model operator\n"
" dimensions (or # of maps).\n"
" * (tf, tk) - Turbo-factor and the rank\n"
" of the temporal basis used in\n"
" shuffling.\n"
" * ntr - Number of TRs, or the number of\n"
" (ky, kz) points acquired of one\n"
" echo image.\n"
" * n - Total number of (ky, kz) points\n"
" acquired. This is equal to the\n"
" product of ntr and tf.\n\n"
"Descriptions:\n"
" * reorder is an (n by 3) index matrix such that\n"
" [ky, kz, t] = reorder(i, :) represents the\n"
" (ky, kz) kspace position of the readout line\n"
" acquired at echo number (t), and 0 <= ky < sy,\n"
" 0 <= kz < sz, 0 <= t < tf).\n"
" * table is a (wx by nc by n) matrix such that\n"
" table(:, :, k) represents the kth multichannel\n"
" kspace line.\n\n"
"Expected dimensions:\n"
" * maps - ( sx, sy, sz, nc, md, 1, 1)\n"
" * wave - ( wx, sy, sz, 1, 1, 1, 1)\n"
" * phi - ( 1, 1, 1, 1, 1, tf, tk)\n"
" * output - ( sx, sy, sz, 1, md, 1, tk)\n"
" * reorder - ( n, 3, 1, 1, 1, 1, 1)\n"
" * table - ( wx, nc, n, 1, 1, 1, 1)";
/* Helper function to print out operator dimensions. */
static void print_opdims(const struct linop_s* op)
{
const struct iovec_s* domain = linop_domain(op);
const struct iovec_s* codomain = linop_codomain(op);
debug_printf(DP_INFO, "\tDomain: [");
for (long k = 0; k < domain->N; k ++)
debug_printf(DP_INFO, "%6ld", domain->dims[k]);
debug_printf(DP_INFO, "]\n");
debug_printf(DP_INFO, "\tCodomain: [");
for (long k = 0; k < codomain->N; k ++)
debug_printf(DP_INFO, "%6ld", codomain->dims[k]);
debug_printf(DP_INFO, "]\n");
}
/* Construct sampling mask array from reorder tables. */
static void construct_mask(
long reorder_dims[DIMS], complex float* reorder,
long mask_dims[DIMS], complex float* mask)
{
long n = reorder_dims[0];
long sy = mask_dims[1];
long sz = mask_dims[2];
long y = 0;
long z = 0;
long t = 0;
for (int i = 0; i < n; i++) {
y = lround(creal(reorder[i]));
z = lround(creal(reorder[i + n]));
t = lround(creal(reorder[i + 2 * n]));
mask[(y + z * sy) + t * sy * sz] = 1;
}
}
struct kern_s {
INTERFACE(linop_data_t);
unsigned int N;
long* reorder_dims; // Dimension of the index table: ( n, 3, 1, 1, 1, 1, 1, 1)
long* phi_dims; // Dimension of the temporal basis: ( 1, 1, 1, 1, 1, tf, tk, 1)
long* table_dims; // Dimension of the data table: (wx, nc, n, 1, 1, 1, 1, 1)
long* kernel_dims; // Dimension of the kernel: ( 1, sy, sz, 1, 1, 1, tk, tk)
complex float* reorder;
complex float* phi;
complex float* kernel;
complex float* gpu_kernel;
};
static DEF_TYPEID(kern_s);
/* Go to table from coefficient-kspace with memory efficiency. */
static void kern_apply(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct kern_s* data = CAST_DOWN(kern_s, _data);
long wx = data->table_dims[0];
long sy = data->kernel_dims[1];
long sz = data->kernel_dims[2];
long nc = data->table_dims[1];
long n = data->reorder_dims[0];
long tf = data->phi_dims[5];
long tk = data->phi_dims[6];
long input_dims[] = { [0 ... DIMS - 1] = 1 };
input_dims[0] = wx;
input_dims[1] = sy;
input_dims[2] = sz;
input_dims[3] = nc;
input_dims[6] = tk;
long perm_dims[] = { [0 ... DIMS - 1] = 1 };
perm_dims[0] = wx;
perm_dims[1] = nc;
perm_dims[3] = tk;
perm_dims[4] = sy;
perm_dims[5] = sz;
complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, src);
unsigned int permute_order[DIMS] = {0, 3, 5, 6, 1, 2, 4, 7};
for (unsigned int i = 8; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, perm_dims, perm, input_dims, src, CFL_SIZE);
long vec_dims[] = {wx, nc, tf, 1};
long phi_mat_dims[] = { 1, 1, tf, tk};
long phi_in_dims[] = {wx, nc, 1, tk};
long fmac_dims[] = {wx, nc, tf, tk};
long line_dims[] = {wx, nc, 1, 1};
complex float* vec = md_alloc_sameplace(4, vec_dims, CFL_SIZE, src);
long vec_str[4];
md_calc_strides(4, vec_str, vec_dims, CFL_SIZE);
long phi_mat_str[4];
md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE);
long phi_in_str[4];
md_calc_strides(4, phi_in_str, phi_in_dims, CFL_SIZE);
long fmac_str[4];
md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE);
int y = -1;
int z = -1;
int t = -1;
for (int i = 0; i < n; i ++) {
y = lround(creal(data->reorder[i]));
z = lround(creal(data->reorder[i + n]));
t = lround(creal(data->reorder[i + 2 * n]));
md_clear(4, vec_dims, vec, CFL_SIZE);
md_zfmac2(4, fmac_dims, vec_str, vec, phi_in_str, (perm + ((wx * nc * tk) * (y + z * sy))), phi_mat_str, data->phi);
md_copy(4, line_dims, dst + (i * wx * nc), vec + (t * wx * nc), CFL_SIZE);
}
md_free(perm);
md_free(vec);
}
/* Collapse data table into the temporal basis for memory efficiency. */
static void kern_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src)
{
struct kern_s* data = CAST_DOWN(kern_s, _data);
long wx = data->table_dims[0];
long sy = data->kernel_dims[1];
long sz = data->kernel_dims[2];
long nc = data->table_dims[1];
long n = data->reorder_dims[0];
long tf = data->phi_dims[5];
long tk = data->phi_dims[6];
long perm_dims[] = { [0 ... DIMS - 1] = 1 };
perm_dims[0] = wx;
perm_dims[1] = nc;
perm_dims[3] = tk;
perm_dims[4] = sy;
perm_dims[5] = sz;
complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, dst);
md_clear(DIMS, perm_dims, perm, CFL_SIZE);
#ifdef _OPENMP
long num_threads = omp_get_max_threads();
#else
long num_threads = 1;
#endif
long vec_dims[] = {wx, nc, tf, 1};
long phi_mat_dims[] = { 1, 1, tf, tk};
long phi_out_dims[] = {wx, nc, 1, tk};
long fmac_dims[] = {wx, nc, tf, tk};
long line_dims[] = {wx, nc, 1, 1};
long vthrd_dims[] = {wx, nc, tf, 1, num_threads};
complex float* vec = md_alloc_sameplace(5, vthrd_dims, CFL_SIZE, dst);
md_clear(5, vthrd_dims, vec, CFL_SIZE);
long vec_str[4];
md_calc_strides(4, vec_str, vec_dims, CFL_SIZE);
long phi_mat_str[4];
md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE);
long phi_out_str[4];
md_calc_strides(4, phi_out_str, phi_out_dims, CFL_SIZE);
long fmac_str[4];
md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE);
long flag_dims[1] = { n };
complex float* flags = md_calloc(1, flag_dims, CFL_SIZE);
#pragma omp parallel for
for (int k = 0; k < n; k ++) {
#ifdef _OPENMP
int tid = omp_get_thread_num();
#else
int tid = 0;
#endif
int y = lround(creal(data->reorder[k]));
int z = lround(creal(data->reorder[k + n]));
int t = -1;
if (0 == flags[k]) {
md_clear(4, vec_dims, vec + (wx * nc * tf * tid), CFL_SIZE);
for (int i = k; i < n; i ++) {
if ((y == lround(creal(data->reorder[i]))) && (z == lround(creal(data->reorder[i + n])))) {
flags[i] = 1;
t = lround(creal(data->reorder[i + 2 * n]));
md_copy(4, line_dims, (vec + (wx * nc * tf * tid) + t * wx * nc), (src + i * wx * nc), CFL_SIZE);
}
}
md_zfmacc2(4, fmac_dims, phi_out_str, perm + (y + z * sy) * (wx * nc * tk), vec_str, vec + (wx * nc * tf * tid), phi_mat_str, data->phi);
}
}
long out_dims[] = { [0 ... DIMS - 1] = 1 };
out_dims[0] = wx;
out_dims[1] = sy;
out_dims[2] = sz;
out_dims[3] = nc;
out_dims[6] = tk;
unsigned int permute_order[DIMS] = {0, 4, 5, 1, 6, 2, 3, 7};
for (unsigned int i = 8; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, out_dims, dst, perm_dims, perm, CFL_SIZE);
md_free(vec);
md_free(perm);
md_free(flags);
}
static void kern_normal(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct kern_s* data = CAST_DOWN(kern_s, _data);
long wx = data->table_dims[0];
long sy = data->kernel_dims[1];
long sz = data->kernel_dims[2];
long nc = data->table_dims[1];
long tk = data->phi_dims[6];
long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
input_dims[0] = wx;
input_dims[1] = sy;
input_dims[2] = sz;
input_dims[3] = nc;
input_dims[6] = tk;
long input_str[DIMS];
md_calc_strides(DIMS, input_str, input_dims, CFL_SIZE);
long output_dims[DIMS];
md_copy_dims(DIMS, output_dims, input_dims);
output_dims[6] = 1;
output_dims[7] = tk;
long output_str[DIMS];
md_calc_strides(DIMS, output_str, output_dims, CFL_SIZE);
long gpu_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, gpu_kernel_dims, data->kernel_dims);
gpu_kernel_dims[0] = wx;
gpu_kernel_dims[3] = nc;
long kernel_str[DIMS];
md_calc_strides(DIMS, kernel_str, data->kernel_dims, CFL_SIZE);
long gpu_kernel_str[DIMS];
md_calc_strides(DIMS, gpu_kernel_str, gpu_kernel_dims, CFL_SIZE);
long fmac_dims[DIMS];
md_merge_dims(DIMS, fmac_dims, input_dims, data->kernel_dims);
md_clear(DIMS, output_dims, dst, CFL_SIZE);
#ifdef USE_CUDA
if(cuda_ondevice(src))
md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, gpu_kernel_str, data->gpu_kernel);
else
#endif
md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, kernel_str, data->kernel);
}
static void kern_free(const linop_data_t* _data)
{
const struct kern_s* data = CAST_DOWN(kern_s, _data);
xfree(data->reorder_dims);
xfree(data->phi_dims);
xfree(data->table_dims);
xfree(data->kernel_dims);
#ifdef USE_CUDA
if (data->gpu_kernel != NULL)
md_free(data->gpu_kernel);
#endif
xfree(data);
}
static const struct linop_s* linop_kern_create(bool gpu_flag,
const long _reorder_dims[DIMS], complex float* reorder,
const long _phi_dims[DIMS], complex float* phi,
const long _kernel_dims[DIMS], complex float* kernel,
const long _table_dims[DIMS])
{
PTR_ALLOC(struct kern_s, data);
SET_TYPEID(kern_s, data);
PTR_ALLOC(long[DIMS], reorder_dims);
PTR_ALLOC(long[DIMS], phi_dims);
PTR_ALLOC(long[DIMS], table_dims);
PTR_ALLOC(long[DIMS], kernel_dims);
md_copy_dims(DIMS, *reorder_dims, _reorder_dims);
md_copy_dims(DIMS, *phi_dims, _phi_dims);
md_copy_dims(DIMS, *table_dims, _table_dims);
md_copy_dims(DIMS, *kernel_dims, _kernel_dims);
data->reorder_dims = *PTR_PASS(reorder_dims);
data->phi_dims = *PTR_PASS(phi_dims);
data->table_dims = *PTR_PASS(table_dims);
data->kernel_dims = *PTR_PASS(kernel_dims);
data->reorder = reorder;
data->phi = phi;
data->kernel = kernel;
data->gpu_kernel = NULL;
#ifdef USE_CUDA
if(gpu_flag) {
long repmat_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, repmat_kernel_dims, _kernel_dims);
repmat_kernel_dims[0] = _table_dims[0];
repmat_kernel_dims[3] = _table_dims[1];
long kernel_strs[DIMS];
long repmat_kernel_strs[DIMS];
md_calc_strides(DIMS, kernel_strs, _kernel_dims, CFL_SIZE);
md_calc_strides(DIMS, repmat_kernel_strs, repmat_kernel_dims, CFL_SIZE);
complex float* repmat_kernel = md_calloc(DIMS, repmat_kernel_dims, CFL_SIZE);
md_copy2(DIMS, repmat_kernel_dims, repmat_kernel_strs, repmat_kernel, kernel_strs, kernel, CFL_SIZE);
data->gpu_kernel = md_gpu_move(DIMS, repmat_kernel_dims, repmat_kernel, CFL_SIZE);
md_free(repmat_kernel);
}
#else
UNUSED(gpu_flag);
#endif
long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
input_dims[0] = _table_dims[0];
input_dims[1] = _kernel_dims[1];
input_dims[2] = _kernel_dims[2];
input_dims[3] = _table_dims[1];
input_dims[6] = _phi_dims[6];
long output_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
output_dims[0] = _table_dims[0];
output_dims[1] = _table_dims[1];
output_dims[2] = _reorder_dims[0];
const struct linop_s* K = linop_create(DIMS, output_dims, DIMS, input_dims, CAST_UP(PTR_PASS(data)), kern_apply, kern_adjoint, kern_normal, NULL, kern_free);
return K;
}
struct multc_s {
INTERFACE(linop_data_t);
unsigned int nc;
unsigned int md;
const complex float* maps;
const struct linop_s* sc_op; // Single channel operator.
};
static DEF_TYPEID(multc_s);
static void multc_apply(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
// Loading single channel operator.
const struct operator_s* fwd = data->sc_op->forward;
const long* sc_inp_dims = linop_domain(data->sc_op)->dims;
const long* sc_out_dims = linop_codomain(data->sc_op)->dims;
long sx = sc_inp_dims[0];
long sy = sc_inp_dims[1];
long sz = sc_inp_dims[2];
long wx = sc_out_dims[0];
long n = sc_out_dims[2];
long nc = data->nc;
long md = data->md;
long src_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, src_dims, sc_inp_dims);
src_dims[MAPS_DIM] = md;
long dst_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, dst_dims, sc_out_dims);
dst_dims[1] = nc;
long map_dims[] = { [0 ... DIMS - 1] = 1};
map_dims[0] = sx;
map_dims[1] = sy;
map_dims[2] = sz;
map_dims[3] = nc;
map_dims[4] = md;
long single_map_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_map_dims, map_dims);
single_map_dims[COIL_DIM] = 1;
complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src);
complex float* buffer = md_alloc_sameplace(DIMS, sc_inp_dims, CFL_SIZE, src);
long tbl_dims[] = { [0 ... DIMS - 1] = 1};
tbl_dims[0] = wx;
tbl_dims[1] = n;
tbl_dims[2] = nc;
complex float* tbl = md_alloc_sameplace(DIMS, tbl_dims, CFL_SIZE, src);
md_clear(DIMS, tbl_dims, tbl, CFL_SIZE);
long pos[] = { [0 ... DIMS - 1] = 0 };
long zfmac_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, zfmac_dims, src_dims);
long strides_single_map[DIMS];
md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE);
long strides_src[DIMS];
md_calc_strides(DIMS, strides_src, src_dims, CFL_SIZE);
long strides_sc_inp[DIMS];
md_calc_strides(DIMS, strides_sc_inp, sc_inp_dims, CFL_SIZE);
for (long k = 0; k < data->nc; k++) {
md_clear(DIMS, single_map_dims, single_map, CFL_SIZE);
md_clear(DIMS, sc_inp_dims, buffer, CFL_SIZE);
pos[COIL_DIM] = k;
md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE);
pos[COIL_DIM] = 0;
md_zfmac2(DIMS, zfmac_dims, strides_sc_inp, buffer, strides_src, src, strides_single_map, single_map);
operator_apply(fwd, DIMS, sc_out_dims, tbl + (wx * n * k), DIMS, sc_inp_dims, buffer);
}
md_clear(DIMS, dst_dims, dst, CFL_SIZE);
unsigned int permute_order[DIMS] = {0, 2, 1};
for (unsigned int i = 3; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, dst_dims, dst, tbl_dims, tbl, CFL_SIZE);
md_free(single_map);
md_free(buffer);
md_free(tbl);
}
static void multc_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
// Loading single channel operator.
const struct operator_s* adj = data->sc_op->adjoint;
const long* sc_inp_dims = linop_codomain(data->sc_op)->dims;
const long* sc_out_dims = linop_domain(data->sc_op)->dims;
long sx = sc_out_dims[0];
long sy = sc_out_dims[1];
long sz = sc_out_dims[2];
long wx = sc_inp_dims[0];
long n = sc_inp_dims[2];
long nc = data->nc;
long md = data->md;
long src_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, src_dims, sc_inp_dims);
src_dims[1] = nc;
long dst_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, dst_dims, sc_out_dims);
dst_dims[MAPS_DIM] = md;
long map_dims[] = { [0 ... DIMS - 1] = 1};
map_dims[0] = sx;
map_dims[1] = sy;
map_dims[2] = sz;
map_dims[3] = nc;
map_dims[4] = md;
long single_map_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_map_dims, map_dims);
single_map_dims[COIL_DIM] = 1;
complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src);
complex float* buffer1 = md_alloc_sameplace(DIMS, sc_out_dims, CFL_SIZE, src);
complex float* buffer2 = md_alloc_sameplace(DIMS, dst_dims, CFL_SIZE, src);
long tbl_dims[] = { [0 ... DIMS - 1] = 1};
tbl_dims[0] = wx;
tbl_dims[2] = n;
complex float* tbl = md_alloc_sameplace(DIMS, tbl_dims, CFL_SIZE, src);
long pos[] = { [0 ... DIMS - 1] = 0 };
long strides_single_map[DIMS];
md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE);
long strides_sc_out[DIMS];
md_calc_strides(DIMS, strides_sc_out, sc_out_dims, CFL_SIZE);
long strides_dst[DIMS];
md_calc_strides(DIMS, strides_dst, dst_dims, CFL_SIZE);
md_clear(DIMS, dst_dims, dst, CFL_SIZE);
for (long k = 0; k < data->nc; k++) {
md_clear(DIMS, single_map_dims, single_map, CFL_SIZE);
md_clear(DIMS, sc_out_dims, buffer1, CFL_SIZE);
md_clear(DIMS, dst_dims, buffer2, CFL_SIZE);
md_clear(DIMS, tbl_dims, tbl, CFL_SIZE);
pos[1] = k;
md_slice(DIMS, 2, pos, src_dims, tbl, src, CFL_SIZE);
pos[1] = 0;
operator_apply(adj, DIMS, sc_out_dims, buffer1, DIMS, tbl_dims, tbl);
pos[COIL_DIM] = k;
md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE);
pos[COIL_DIM] = 0;
md_zfmacc2(DIMS, dst_dims, strides_dst, buffer2, strides_sc_out, buffer1, strides_single_map, single_map);
md_zadd(DIMS, dst_dims, dst, dst, buffer2);
}
md_free(single_map);
md_free(buffer1);
md_free(buffer2);
md_free(tbl);
}
static void multc_normal(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
// Loading single channel operator.
const struct operator_s* nrm = data->sc_op->normal;
const long* sc_dims = linop_domain(data->sc_op)->dims;
long sx = sc_dims[0];
long sy = sc_dims[1];
long sz = sc_dims[2];
long nc = data->nc;
long md = data->md;
long dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, dims, sc_dims);
dims[MAPS_DIM] = md;
long map_dims[] = { [0 ... DIMS - 1] = 1};
map_dims[0] = sx;
map_dims[1] = sy;
map_dims[2] = sz;
map_dims[3] = nc;
map_dims[4] = md;
long single_map_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_map_dims, map_dims);
single_map_dims[COIL_DIM] = 1;
complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src);
complex float* buffer1 = md_alloc_sameplace(DIMS, sc_dims, CFL_SIZE, src);
complex float* buffer2 = md_alloc_sameplace(DIMS, sc_dims, CFL_SIZE, src);
complex float* buffer3 = md_alloc_sameplace(DIMS, dims, CFL_SIZE, src);
long pos[] = { [0 ... DIMS - 1] = 0 };
long strides_single_map[DIMS];
md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE);
long strides_sc[DIMS];
md_calc_strides(DIMS, strides_sc, sc_dims, CFL_SIZE);
long strides[DIMS];
md_calc_strides(DIMS, strides, dims, CFL_SIZE);
md_clear(DIMS, dims, dst, CFL_SIZE);
for (long k = 0; k < data->nc; k++) {
md_clear(DIMS, single_map_dims, single_map, CFL_SIZE);
md_clear(DIMS, sc_dims, buffer1, CFL_SIZE);
md_clear(DIMS, sc_dims, buffer2, CFL_SIZE);
md_clear(DIMS, dims, buffer3, CFL_SIZE);
pos[COIL_DIM] = k;
md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE);
pos[COIL_DIM] = 0;
md_zfmac2(DIMS, dims, strides_sc, buffer1, strides, src, strides_single_map, single_map);
operator_apply(nrm, DIMS, sc_dims, buffer2, DIMS, sc_dims, buffer1);
md_zfmacc2(DIMS, dims, strides, buffer3, strides_sc, buffer2, strides_single_map, single_map);
md_zadd(DIMS, dims, dst, dst, buffer3);
}
md_free(single_map);
md_free(buffer1);
md_free(buffer2);
md_free(buffer3);
}
static void multc_free(const linop_data_t* _data)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
xfree(data);
}
static struct linop_s* linop_multc_create(long nc, long md, const complex float* maps, const struct linop_s* sc_op)
{
PTR_ALLOC(struct multc_s, data);
SET_TYPEID(multc_s, data);
data->nc = nc;
data->md = md;
data->maps = maps;
data->sc_op = sc_op;
long* op_inp_dims = (long*) linop_domain(sc_op)->dims;
long* op_out_dims = (long*) linop_codomain(sc_op)->dims;
long input_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, input_dims, op_inp_dims);
input_dims[MAPS_DIM] = md;
long output_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, output_dims, op_out_dims);
output_dims[1] = nc;
struct linop_s* E = linop_create(DIMS, output_dims, DIMS, input_dims, CAST_UP(PTR_PASS(data)), multc_apply, multc_adjoint, multc_normal, NULL, multc_free);
return E;
}
/* Resize operator. */
static const struct linop_s* linop_wavereshape_create(long wx, long sx, long sy, long sz, long nc, long tk)
{
long input_dims[] = { [0 ... DIMS - 1] = 1};
input_dims[0] = sx;
input_dims[1] = sy;
input_dims[2] = sz;
input_dims[3] = nc;
input_dims[6] = tk;
long output_dims[DIMS];
md_copy_dims(DIMS, output_dims, input_dims);
output_dims[0] = wx;
struct linop_s* R = linop_resize_create(DIMS, output_dims, input_dims);
return R;
}
/* Fx operator. */
static const struct linop_s* linop_fx_create(long wx, long sy, long sz, long nc, long tk, bool centered)
{
long dims[] = { [0 ... DIMS - 1] = 1};
dims[0] = wx;
dims[1] = sy;
dims[2] = sz;
dims[3] = nc;
dims[6] = tk;
struct linop_s* Fx = NULL;
if (centered)
Fx = linop_fftc_create(DIMS, dims, READ_FLAG);
else
Fx = linop_fft_create(DIMS, dims, READ_FLAG);
return Fx;
}
/* Wave operator. */
static const struct linop_s* linop_wave_create(long wx, long sy, long sz, long nc, long tk, long psf_tk, complex float* psf)
{
long dims[] = { [0 ... DIMS - 1] = 1};
dims[0] = wx;
dims[1] = sy;
dims[2] = sz;
dims[3] = nc;
dims[6] = tk;
return (psf_tk > 1) ? linop_cdiag_create(DIMS, dims, FFT_FLAGS | COEFF_FLAG, psf) : linop_cdiag_create(DIMS, dims, FFT_FLAGS, psf);
}
/* Fyz operator. */
static const struct linop_s* linop_fyz_create(long wx, long sy, long sz, long nc, long tk, bool centered)
{
long dims[] = { [0 ... DIMS - 1] = 1};
dims[0] = wx;
dims[1] = sy;
dims[2] = sz;
dims[3] = nc;
dims[6] = tk;
struct linop_s* Fyz = NULL;
if (centered)
Fyz = linop_fftc_create(DIMS, dims, PHS1_FLAG|PHS2_FLAG);
else
Fyz = linop_fft_create(DIMS, dims, PHS1_FLAG|PHS2_FLAG);
return Fyz;
}
/* Construction sampling temporal kernel.*/
static void construct_kernel(
long mask_dims[DIMS], complex float* mask,
long phi_dims[DIMS], complex float* phi,
long kern_dims[DIMS], complex float* kern)
{
long sy = mask_dims[1];
long sz = mask_dims[2];
long tf = phi_dims[5];
long tk = phi_dims[6];
long cvec_dims[] = { [0 ... DIMS - 1] = 1 };
cvec_dims[6] = tk;
long cvec_str[DIMS];
md_calc_strides(DIMS, cvec_str, cvec_dims, CFL_SIZE);
complex float cvec[tk];
long tvec_dims[] = { [0 ... DIMS - 1] = 1 };
tvec_dims[5] = tf;
long tvec_str[DIMS];
md_calc_strides(DIMS, tvec_str, tvec_dims, CFL_SIZE);
complex float mvec[tf];
complex float tvec1[tf];
complex float tvec2[tf];
long phi_str[DIMS];
md_calc_strides(DIMS, phi_str, phi_dims, CFL_SIZE);
long out_dims[] = { [0 ... DIMS - 1] = 1 };
out_dims[0] = tk;
out_dims[1] = sy;
out_dims[2] = sz;
out_dims[3] = tk;
complex float* out = md_calloc(DIMS, out_dims, CFL_SIZE);
for (int y = 0; y < sy; y ++) {
for (int z = 0; z < sz; z ++) {
for (int t = 0; t < tf; t ++)
mvec[t] = mask[(y + sy * z) + (sy * sz) * t];
for (int t = 0; t < tk; t ++) {
cvec[t] = 1;
md_clear(DIMS, tvec_dims, tvec1, CFL_SIZE);
md_zfmac2(DIMS, phi_dims, tvec_str, tvec1, cvec_str, cvec, phi_str, phi);
md_clear(DIMS, tvec_dims, tvec2, CFL_SIZE);
md_zfmac2(DIMS, tvec_dims, tvec_str, tvec2, tvec_str, tvec1, tvec_str, mvec);
md_clear(DIMS, cvec_dims, out + y * tk + z * sy * tk + t * sy * sz * tk, CFL_SIZE);
md_zfmacc2(DIMS, phi_dims, cvec_str, out + y * tk + z * sy * tk + t * sy * sz * tk,
tvec_str, tvec2, phi_str, phi);
cvec[t] = 0;
}
}
}
unsigned int permute_order[DIMS] = {4, 1, 2, 5, 6, 7, 3, 0};
for (unsigned int i = 8; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, kern_dims, kern, out_dims, out, CFL_SIZE);
md_free(out);
}
static void fftmod_apply(long sy, long sz,
long reorder_dims[DIMS], complex float* reorder,
long table_dims[DIMS], complex float* table,
long maps_dims[DIMS], complex float* maps)
{
long wx = table_dims[0];
long nc = table_dims[1];
fftmod(DIMS, table_dims, READ_FLAG, table, table);
fftmod(DIMS, maps_dims, FFT_FLAGS, maps, maps);
long y = -1;
long z = -1;
double dy = ((double) sy/2)/((double) sy);
double dz = ((double) sz/2)/((double) sz);
complex float py = 1;
complex float pz = 1;
long dims[] = { [0 ... DIMS] = 1};
dims[0] = wx;
dims[1] = nc;
long n = reorder_dims[0];
for (long k = 0; k < n; k++) {
y = lround(creal(reorder[k]));
z = lround(creal(reorder[k + n]));
py = cexp(2.i * M_PI * dy * y);
pz = cexp(2.i * M_PI * dz * z);
md_zsmul(DIMS, dims, table + k * wx * nc, table + k * wx * nc, py * pz);
}
}
int main_wshfl(int argc, char* argv[argc])
{
double start_time = timestamp();
struct opt_reg_s ropts;
opt_reg_init(&ropts);
int maxiter = 30;
int cgiter = 10;
int blksize = 8;
float rho = 1;
float eval = -1;
float tol = 1E-3;
bool hgwld = false;
bool ksp = false;
const char* fwd = NULL;
const char* x0 = NULL;
bool use_gpu = false;
bool dcx = false;
const struct opt_s opts[] = {
{ 'R', NULL, true, opt_reg, &ropts, "<T>:A:B:C\tGeneralized regularization options. (-Rh for help)" },
OPT_INT( 'b', &blksize, "blkdim", "Block size for locally low rank."),
OPT_INT( 'i', &maxiter, "mxiter", "Maximum number of iterations."),
OPT_INT( 'j', &cgiter, "cgiter", "Maximum number of CG iterations in ADMM."),
OPT_FLOAT( 's', &rho, "admrho", "ADMM Rho value."),
OPT_FLOAT( 'e', &eval, "eigval", "Eigenvalue to scale step size. (Optional.)"),
OPT_STRING( 'F', &fwd, "frwrd", "Go from shfl-coeffs to data-table. Pass in coeffs path."),
OPT_STRING( 'O', &x0, "initl", "Initialize reconstruction with guess."),
OPT_FLOAT( 't', &tol, "toler", "Tolerance convergence condition for FISTA."),
OPT_SET( 'g', &use_gpu, "Use GPU."),
OPT_SET( 'K', &ksp, "Go from data-table to shuffling basis k-space."),
OPT_SET( 'H', &hgwld, "Use hogwild."),
OPT_SET( 'v', &dcx, "Split coefficients to real and imaginary components."),
};
cmdline(&argc, argv, 6, 6, usage_str, help_str, ARRAY_SIZE(opts), opts);
struct admm_conf admm = { false, false, false, rho, cgiter };
debug_printf(DP_INFO, "Loading data... ");
long maps_dims[DIMS];
complex float* maps = load_cfl(argv[1], DIMS, maps_dims);
long wave_dims[DIMS];
complex float* wave = load_cfl(argv[2], DIMS, wave_dims);
long phi_dims[DIMS];
complex float* phi = load_cfl(argv[3], DIMS, phi_dims);
long reorder_dims[DIMS];
complex float* reorder = load_cfl(argv[4], DIMS, reorder_dims);
long table_dims[DIMS];
complex float* table = load_cfl(argv[5], DIMS, table_dims);
debug_printf(DP_INFO, "Done.\n");
(use_gpu ? num_init_gpu : num_init)();
int wx = wave_dims[0];
int sx = maps_dims[0];
int sy = maps_dims[1];
int sz = maps_dims[2];
int nc = maps_dims[3];
int md = maps_dims[4];
int tf = phi_dims[5];
int tk = phi_dims[6];
debug_printf(DP_INFO, "Constructing sampling mask from reorder table... ");
long mask_dims[] = { [0 ... DIMS - 1] = 1 };
mask_dims[1] = sy;
mask_dims[2] = sz;
mask_dims[5] = tf;
complex float* mask = md_calloc(DIMS, mask_dims, CFL_SIZE);
construct_mask(reorder_dims, reorder, mask_dims, mask);
debug_printf(DP_INFO, "Done.\n");
debug_printf(DP_INFO, "Constructing sampling-temporal kernel... ");
long kernel_dims[] = { [0 ... DIMS - 1] = 1 };
kernel_dims[1] = sy;
kernel_dims[2] = sz;
kernel_dims[6] = tk;
kernel_dims[7] = tk;
complex float* kernel = md_calloc(DIMS, kernel_dims, CFL_SIZE);
construct_kernel(mask_dims, mask, phi_dims, phi, kernel_dims, kernel);
md_free(mask);
debug_printf(DP_INFO, "Done.\n");
long coeff_dims[] = { [0 ... DIMS - 1] = 1 };
coeff_dims[0] = sx;
coeff_dims[1] = sy;
coeff_dims[2] = sz;
coeff_dims[4] = md;
coeff_dims[6] = tk;
coeff_dims[8] = dcx ? 2 : 1;
if (ksp == true) {
const struct linop_s* Knc = linop_kern_create(use_gpu, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, table_dims);
long ksp_dims[] = { [0 ... DIMS - 1] = 1 };
ksp_dims[0] = wx;
ksp_dims[1] = sy;
ksp_dims[2] = sz;
ksp_dims[3] = nc;
ksp_dims[6] = tk;
complex float* res = create_cfl(argv[6], DIMS, ksp_dims);
operator_apply(Knc->adjoint, DIMS, ksp_dims, res, DIMS, table_dims, table);
linop_free(Knc);
md_free(kernel);
unmap_cfl(DIMS, maps_dims, maps);
unmap_cfl(DIMS, wave_dims, wave);
unmap_cfl(DIMS, phi_dims, phi);
unmap_cfl(DIMS, reorder_dims, reorder);
unmap_cfl(DIMS, table_dims, table);
unmap_cfl(DIMS, ksp_dims, res);
return 0;
}
debug_printf(DP_INFO, "Creating single channel linear operators:\n");
double t1;
double t2;
t1 = timestamp();
const struct linop_s* R = linop_wavereshape_create(wx, sx, sy, sz, 1, tk);
t2 = timestamp();
debug_printf(DP_INFO, "\tR: %f seconds.\n", t2 - t1);
t1 = timestamp();
const struct linop_s* Fx = linop_fx_create(wx, sy, sz, 1, tk, false);
t2 = timestamp();
debug_printf(DP_INFO, "\tFx: %f seconds.\n", t2 - t1);
t1 = timestamp();
const struct linop_s* W = linop_wave_create(wx, sy, sz, 1, tk, wave_dims[COEFF_DIM], wave);
t2 = timestamp();
debug_printf(DP_INFO, "\tW: %f seconds.\n", t2 - t1);
t1 = timestamp();
const struct linop_s* Fyz = linop_fyz_create(wx, sy, sz, 1, tk, false);
t2 = timestamp();
debug_printf(DP_INFO, "\tFyz: %f seconds.\n", t2 - t1);
t1 = timestamp();
long single_channel_table_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_channel_table_dims, table_dims);
single_channel_table_dims[1] = 1;
const struct linop_s* K = linop_kern_create(use_gpu, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, single_channel_table_dims);
t2 = timestamp();
debug_printf(DP_INFO, "\tK: %f seconds.\n", t2 - t1);
struct linop_s* A_sc = linop_chain_FF(linop_chain_FF(linop_chain_FF(linop_chain_FF(
R, Fx), W), Fyz), K);
debug_printf(DP_INFO, "Single channel forward operator information:\n");
print_opdims(A_sc);
struct linop_s* A = linop_multc_create(nc, md, maps, A_sc);
debug_printf(DP_INFO, "Overall forward linear operator information:\n");
print_opdims(A);
if (fwd != NULL) {
debug_printf(DP_INFO, "Going from coefficients to data table... ");
complex float* coeffs_to_fwd = load_cfl(fwd, DIMS, coeff_dims);
complex float* table_forward = create_cfl(argv[6], DIMS, table_dims);
const struct linop_s* R = linop_wavereshape_create(wx, sx, sy, sz, 1, tk);
const struct linop_s* CFx = linop_fx_create( wx, sy, sz, 1, tk, true);
const struct linop_s* W = linop_wave_create(wx, sy, sz, 1, tk, wave_dims[COEFF_DIM], wave);
const struct linop_s* CFyz = linop_fyz_create(wx, sy, sz, 1, tk, true);
const struct linop_s* K = linop_kern_create(use_gpu, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, single_channel_table_dims);
struct linop_s* AC_sc = linop_chain_FF(linop_chain_FF(linop_chain_FF(linop_chain_FF(
R, CFx), W), CFyz), K);
struct linop_s* AC = linop_multc_create(nc, md, maps, AC_sc);
operator_apply(AC->forward, DIMS, table_dims, table_forward, DIMS, coeff_dims, coeffs_to_fwd);
debug_printf(DP_INFO, "Done.\n");
debug_printf(DP_INFO, "Cleaning up... ");
linop_free(AC);
linop_free(AC_sc);
md_free(kernel);
unmap_cfl(DIMS, maps_dims, maps);
unmap_cfl(DIMS, wave_dims, wave);
unmap_cfl(DIMS, phi_dims, phi);
unmap_cfl(DIMS, reorder_dims, reorder);
unmap_cfl(DIMS, table_dims, table);
unmap_cfl(DIMS, table_dims, table_forward);
debug_printf(DP_INFO, "Done.\n");
return 0;
}
if (dcx) {
debug_printf(DP_INFO, "\tSplitting result into real and imaginary components.\n");
struct linop_s* tmp = A;
struct linop_s* dcxop = linop_decompose_complex_create(DIMS, ITER_DIM, linop_domain(A)->dims);
A = linop_chain(dcxop, tmp);
debug_printf(DP_INFO, "New operator information:\n");
print_opdims(A);
linop_free(dcxop);
linop_free(tmp);
}
debug_printf(DP_INFO, "Normalizing data table and applying fftmod to table and maps... ");
float norm = md_znorm(DIMS, table_dims, table);
md_zsmul(DIMS, table_dims, table, table, 1. / norm);
fftmod_apply(sy, sz, reorder_dims, reorder, table_dims, table, maps_dims, maps);
debug_printf(DP_INFO, "Done.\n");
debug_printf(DP_INFO, "Preparing reconstruction operator: ");
const struct operator_p_s* thresh_ops[NUM_REGS] = { NULL };
const struct linop_s* trafos[NUM_REGS] = { NULL };
opt_reg_configure(DIMS, coeff_dims, &ropts, thresh_ops, trafos, blksize, 1, use_gpu);
int nr_penalties = ropts.r;
struct reg_s* regs = ropts.regs;
bool fista = (nr_penalties == 1);
// FISTA variables.
float step = 0.5;
italgo_fun2_t italgo = iter2_call_iter;
struct iter_call_s iter2_data;
SET_TYPEID(iter_call_s, &iter2_data);
iter_conf* iconf = CAST_UP(&iter2_data);
struct iter_fista_conf fsconf = iter_fista_defaults;
// ADMM variables.
struct iter it;
if (fista) {
if (eval < 0) {
#ifdef USE_CUDA
eval = use_gpu ? estimate_maxeigenval_gpu(A_sc->normal) : estimate_maxeigenval(A_sc->normal);
#else
eval = estimate_maxeigenval(A_sc->normal);
#endif
}
step /= eval;
debug_printf(DP_INFO, "\tAlgorithm: FISTA.\n");
debug_printf(DP_INFO, "\tMax eigenvalue: %.2e\n", eval);
debug_printf(DP_INFO, "\tStep: %.2e\n", step);
debug_printf(DP_INFO, "\tTolerance: %.2e\n", tol);
fsconf.maxiter = maxiter;
fsconf.step = step;
fsconf.hogwild = hgwld;
fsconf.tol = tol;
iter2_data.fun = iter_fista;
iter2_data._conf = CAST_UP(&fsconf);
} else {
debug_printf(DP_INFO, "\tAlgorithm: ADMM\n.");
debug_printf(DP_INFO, "\tRho: %.2e\n.", rho);
it = italgo_config(ALGO_ADMM, nr_penalties, regs, maxiter, step, hgwld, false, admm, 1, false);
}
complex float* init = NULL;
if (x0 != NULL) {
debug_printf(DP_INFO, "Loading in initial guess... ");
init = load_cfl(x0, DIMS, coeff_dims);
debug_printf(DP_INFO, "Done.\n");
}
debug_printf(DP_INFO, "Reconstruction... ");
complex float* recon = create_cfl(argv[6], DIMS, coeff_dims);
struct lsqr_conf lsqr_conf = lsqr_defaults;
lsqr_conf.lambda = 0.;
lsqr_conf.it_gpu = use_gpu;
double recon_start = timestamp();
const struct operator_p_s* J = fista ?
lsqr2_create(&lsqr_conf, italgo, iconf, (const float*) init, A, NULL, nr_penalties, thresh_ops, NULL, NULL):
lsqr2_create(&lsqr_conf, it.italgo, it.iconf, (const float*) init, A, NULL, nr_penalties, thresh_ops, trafos, NULL);
operator_p_apply(J, 1., DIMS, coeff_dims, recon, DIMS, table_dims, table);
md_zsmul(DIMS, coeff_dims, recon, recon, norm);
double recon_end = timestamp();
debug_printf(DP_INFO, "Done.\nReconstruction time: %f seconds.\n", recon_end - recon_start);
debug_printf(DP_INFO, "Cleaning up and saving result... ");
operator_p_free(J);
linop_free(A);
linop_free(A_sc);
md_free(kernel);
unmap_cfl(DIMS, maps_dims, maps);
unmap_cfl(DIMS, wave_dims, wave);
unmap_cfl(DIMS, phi_dims, phi);
unmap_cfl(DIMS, reorder_dims, reorder);
unmap_cfl(DIMS, table_dims, table);
unmap_cfl(DIMS, coeff_dims, recon);
if (x0 != NULL)
unmap_cfl(DIMS, coeff_dims, init);
debug_printf(DP_INFO, "Done.\n");
double end_time = timestamp();
debug_printf(DP_INFO, "Total time: %f seconds.\n", end_time - start_time);
return 0;
}
|
tree.h | #ifndef LIGHTGBM_TREE_H_
#define LIGHTGBM_TREE_H_
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <string>
#include <vector>
#include <memory>
#include <cmath>
#include <map>
#include <unordered_map>
namespace LightGBM {
#define kCategoricalMask (1)
#define kDefaultLeftMask (2)
/*!
* \brief Tree model
*/
class Tree {
public:
/*!
* \brief Constructor
* \param max_leaves The number of max leaves
*/
explicit Tree(int max_leaves);
/*!
* \brief Construtor, from a string
* \param str Model string
* \param used_len used count of str
*/
Tree(const char* str, size_t* used_len);
~Tree();
/*! \brief Get the output of one leaf */
inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; }
/*! \brief Set the output of one leaf */
inline void SetLeafOutput(int leaf, double output) {
leaf_value_[leaf] = output;
}
/*!
* \brief Prediction on one record
* \param feature_values Feature value of this record
* \return Prediction result
*/
inline double Predict(const double* feature_values) const;
inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const;
inline int PredictLeafIndex(const double* feature_values) const;
inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const;
inline void PredictContrib(const double* feature_values, int num_features, double* output);
/*! \brief Get Number of leaves*/
inline int num_leaves() const { return num_leaves_; }
/*! \brief Get depth of specific leaf*/
inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; }
/*! \brief Get feature of specific split*/
inline int split_feature(int split_idx) const { return split_feature_[split_idx]; }
inline double split_gain(int split_idx) const { return split_gain_[split_idx]; }
/*! \brief Get the number of data points that fall at or below this node*/
inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; }
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the traning process
* \param rate The factor of shrinkage
*/
inline void Shrinkage(double rate) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_; ++i) {
leaf_value_[i] *= rate;
}
shrinkage_ *= rate;
}
inline double shrinkage() const {
return shrinkage_;
}
inline void AddBias(double val) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_; ++i) {
leaf_value_[i] = val + leaf_value_[i];
}
// force to 1.0
shrinkage_ = 1.0f;
}
inline void AsConstantTree(double val) {
num_leaves_ = 1;
shrinkage_ = 1.0f;
leaf_value_[0] = val;
}
/*! \brief Serialize this object to string*/
std::string ToString() const;
/*! \brief Serialize this object to json*/
std::string ToJSON() const;
/*! \brief Serialize this object to if-else statement*/
std::string ToIfElse(int index, bool predict_leaf_index) const;
inline static bool IsZero(double fval) {
if (fval > -kZeroThreshold && fval <= kZeroThreshold) {
return true;
} else {
return false;
}
}
inline static bool GetDecisionType(int8_t decision_type, int8_t mask) {
return (decision_type & mask) > 0;
}
inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) {
if (input) {
(*decision_type) |= mask;
} else {
(*decision_type) &= (127 - mask);
}
}
inline static int8_t GetMissingType(int8_t decision_type) {
return (decision_type >> 2) & 3;
}
inline static void SetMissingType(int8_t* decision_type, int8_t input) {
(*decision_type) &= 3;
(*decision_type) |= (input << 2);
}
void RecomputeMaxDepth();
private:
std::string NumericalDecisionIfElse(int node) const;
std::string CategoricalDecisionIfElse(int node) const;
inline int NumericalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if (std::isnan(fval)) {
if (missing_type != 2) {
fval = 0.0f;
}
}
if ((missing_type == 1 && IsZero(fval))
|| (missing_type == 2 && std::isnan(fval))) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if ((missing_type == 1 && fval == default_bin)
|| (missing_type == 2 && fval == max_bin)) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_in_bin_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int CategoricalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
int int_fval = static_cast<int>(fval);
if (int_fval < 0) {
return right_child_[node];;
} else if (std::isnan(fval)) {
// NaN is always in the right
if (missing_type == 2) {
return right_child_[node];
}
int_fval = 0;
}
int cat_idx = int(threshold_[node]);
if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx],
cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int CategoricalDecisionInner(uint32_t fval, int node) const {
int cat_idx = int(threshold_in_bin_[node]);
if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx],
cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int Decision(double fval, int node) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecision(fval, node);
} else {
return NumericalDecision(fval, node);
}
}
inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecisionInner(fval, node);
} else {
return NumericalDecisionInner(fval, node, default_bin, max_bin);
}
}
inline void Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt, float gain);
/*!
* \brief Find leaf index of which record belongs by features
* \param feature_values Feature value of this record
* \return Leaf index
*/
inline int GetLeaf(const double* feature_values) const;
inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const;
/*! \brief Serialize one node to json*/
std::string NodeToJSON(int index) const;
/*! \brief Serialize one node to if-else statement*/
std::string NodeToIfElse(int index, bool predict_leaf_index) const;
std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const;
double ExpectedValue() const;
/*! \brief This is used fill in leaf_depth_ after reloading a model*/
inline void RecomputeLeafDepths(int node = 0, int depth = 0);
/*!
* \brief Used by TreeSHAP for data we keep about our decision path
*/
struct PathElement {
int feature_index;
double zero_fraction;
double one_fraction;
// note that pweight is included for convenience and is not tied with the other attributes,
// the pweight of the i'th path element is the permuation weight of paths with i-1 ones in them
double pweight;
PathElement() {}
PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {}
};
/*! \brief Polynomial time algorithm for SHAP values (https://arxiv.org/abs/1706.06060) */
void TreeSHAP(const double *feature_values, double *phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
/*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/
static void ExtendPath(PathElement *unique_path, int unique_depth,
double zero_fraction, double one_fraction, int feature_index);
/*! \brief Undo a previous extension of the decision path for TreeSHAP*/
static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index);
/*! determine what the total permuation weight would be if we unwound a previous extension in the decision path*/
static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current levas*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
std::vector<int> left_child_;
/*! \brief A non-leaf node's right child */
std::vector<int> right_child_;
/*! \brief A non-leaf node's split feature */
std::vector<int> split_feature_inner_;
/*! \brief A non-leaf node's split feature, the original index */
std::vector<int> split_feature_;
/*! \brief A non-leaf node's split threshold in bin */
std::vector<uint32_t> threshold_in_bin_;
/*! \brief A non-leaf node's split threshold in feature value */
std::vector<double> threshold_;
int num_cat_;
std::vector<int> cat_boundaries_inner_;
std::vector<uint32_t> cat_threshold_inner_;
std::vector<int> cat_boundaries_;
std::vector<uint32_t> cat_threshold_;
/*! \brief Store the information for categorical feature handle and mising value handle. */
std::vector<int8_t> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<float> split_gain_;
// used for leaf node
/*! \brief The parent of leaf */
std::vector<int> leaf_parent_;
/*! \brief Output of leaves */
std::vector<double> leaf_value_;
/*! \brief DataCount of leaves */
std::vector<int> leaf_count_;
/*! \brief Output of non-leaf nodes */
std::vector<double> internal_value_;
/*! \brief DataCount of non-leaf nodes */
std::vector<int> internal_count_;
/*! \brief Depth for leaves */
std::vector<int> leaf_depth_;
double shrinkage_;
int max_depth_;
};
inline void Tree::Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt, float gain) {
int new_node_idx = num_leaves_ - 1;
// update parent info
int parent = leaf_parent_[leaf];
if (parent >= 0) {
// if cur node is left child
if (left_child_[parent] == ~leaf) {
left_child_[parent] = new_node_idx;
} else {
right_child_[parent] = new_node_idx;
}
}
// add new node
split_feature_inner_[new_node_idx] = feature;
split_feature_[new_node_idx] = real_feature;
split_gain_[new_node_idx] = Common::AvoidInf(gain);
// add two new leaves
left_child_[new_node_idx] = ~leaf;
right_child_[new_node_idx] = ~num_leaves_;
// update new leaves
leaf_parent_[leaf] = new_node_idx;
leaf_parent_[num_leaves_] = new_node_idx;
// save current leaf value to internal node before change
internal_value_[new_node_idx] = leaf_value_[leaf];
internal_count_[new_node_idx] = left_cnt + right_cnt;
leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value;
leaf_count_[leaf] = left_cnt;
leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value;
leaf_count_[num_leaves_] = right_cnt;
// update leaf depth
leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1;
leaf_depth_[leaf]++;
}
inline double Tree::Predict(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline int Tree::PredictLeafIndex(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return leaf;
} else {
return 0;
}
}
inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return leaf;
} else {
return 0;
}
}
inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) {
output[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1) {
CHECK(max_depth_ >= 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2);
TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::RecomputeLeafDepths(int node, int depth) {
if (node == 0) leaf_depth_.resize(num_leaves());
if (node < 0) {
leaf_depth_[~node] = depth;
} else {
RecomputeLeafDepths(left_child_[node], depth + 1);
RecomputeLeafDepths(right_child_[node], depth + 1);
}
}
inline int Tree::GetLeaf(const double* feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values[split_feature_[node]], node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values[split_feature_[node]], node);
}
}
return ~node;
}
inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
}
return ~node;
}
} // namespace LightGBM
#endif // LightGBM_TREE_H_
|
cholesky_gpu.c | /*
This program performs cholesky decomposition on the GPU with
dynamically allocated matrices.
Author: Gleison Souza Diniz Mendon?a
Date: 04-01-2015
version 2.0
Run:
ipmacc cholesky_gpu.c -o cholesky
./cholesky matrix-size
*/
#include "BenchmarksUtil.h"
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#ifdef RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
#define ERROR_THRESHOLD 0.05
// Initialize matrices.
void init_arrays(float *A, float *B_GPU, float *B_CPU) {
int i, j, q;
q = SIZE * SIZE;
for (i = 0; i < SIZE; ++i) {
for (j = 0; j < SIZE; ++j) {
A[i * SIZE + j] = (float)(q - (10 * i) - (5 * j));
B_GPU[i * SIZE + j] = 0.0f;
B_CPU[i * SIZE + j] = 0.0f;
}
}
}
/// Cholesky algorithm GPU
/// s = size of matrix
void cholesky_GPU(float *A, float *B) {
#pragma omp target map(to : A[0 : SIZE *SIZE]) map(tofrom : B[0 : SIZE *SIZE]) \
device(DEVICE_ID)
{
#pragma omp parallel for collapse(1)
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j <= i; j++) {
float t = 0.0f;
for (int k = 0; k < j; k++) {
if (B[i * SIZE + k] != 0.0f && B[j * SIZE + k] != 0.0f) {
t += B[i * SIZE + k] * B[j * SIZE + k];
} else {
k--;
}
}
if (i == j) {
B[i * SIZE + j] = sqrt((A[i * SIZE + i] - t));
} else {
if (B[j * SIZE + j] != 0.0f) {
B[i * SIZE + j] = (1.0 / B[j * SIZE + j] * (A[i * SIZE + j] - t));
} else {
j--;
}
}
}
}
}
}
void cholesky_CPU(float *A, float *B) {
int i, j, k;
for (i = 0; i < SIZE; i++) {
for (j = 0; j <= i; j++) {
float t;
t = 0.0f;
for (k = 0; k < j; k++) {
t += B[i * SIZE + k] * B[j * SIZE + k];
}
if (i == j) {
B[i * SIZE + j] = sqrt((A[i * SIZE + i] - t));
} else {
B[i * SIZE + j] = (1.0 / B[j * SIZE + j] * (A[i * SIZE + j] - t));
}
}
}
}
int compareResults(float *E, float *E_GPU) {
int i, j, fail;
fail = 0;
for (i = 0; i < SIZE; i++) {
for (j = 0; j < SIZE; j++) {
if (percentDiff(E[i * SIZE + j], E_GPU[i * SIZE + j]) > ERROR_THRESHOLD) {
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
ERROR_THRESHOLD, fail);
return fail;
}
int main(int argc, char *argv[]) {
double t_start, t_end;
float *A, *B_CPU, *B_GPU;
int fail = 0;
A = (float *)malloc(SIZE * SIZE * sizeof(float));
B_CPU = (float *)malloc(SIZE * SIZE * sizeof(float));
B_GPU = (float *)malloc(SIZE * SIZE * sizeof(float));
fprintf(stdout, "<< Cholesky >>\n");
init_arrays(A, B_CPU, B_GPU);
t_start = rtclock();
cholesky_GPU(A, B_GPU);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
#ifdef RUN_TEST
t_start = rtclock();
cholesky_CPU(A, B_CPU);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail = compareResults(B_CPU, B_GPU);
#endif
free(A);
free(B_CPU);
free(B_GPU);
return fail;
}
|
weights.c | #include <stdio.h>
#include <stdint.h>
#include <omp.h>
#include <math.h>
#include "geometry.h"
#include "allocator.h"
#include "mesh2geo.h"
static inline void
compute_terms(
const struct geometry *g,
const double *w,
struct xyz *terms0,
struct xyz *terms1)
{
const uint32_t *ie = g->s->i;
const uint32_t *part = g->n->part;
const uint32_t *n0 = g->e->eptr->n0;
const uint32_t *n1 = g->e->eptr->n1;
const double *x0 = g->n->xyz->x0;
const double *x1 = g->n->xyz->x1;
const double *x2 = g->n->xyz->x2;
#pragma omp parallel
{
const uint32_t t = (uint32_t) omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const double coordx0 = x0[node0];
const double coordy0 = x1[node0];
const double coordz0 = x2[node0];
const double coordx1 = x0[node1];
const double coordy1 = x1[node1];
const double coordz1 = x2[node1];
const double dx = coordx1 - coordx0;
const double dy = coordy1 - coordy0;
const double dz = coordz1 - coordz0;
double c0;
double c1;
double termx;
double termy;
double termz;
if(part[node0] == t)
{
c0 = - dx * w[node0 * 7 + 1] + dy;
c1 = - dx * w[node0 * 7 + 2] + dz;
c1 = - w[node0 * 7 + 4] * c0 + c1;
termx = w[node0 * 7 + 3] * w[node0 * 7 + 1] * c0;
termx = dx * w[node0 * 7 + 0] - termx;
termx += w[node0 * 7 + 6] * c1;
termy = w[node0 * 7 + 4] * w[node0 * 7 + 5] * c1;
termy = w[node0 * 7 + 3] * c0 - termy;
termz = w[node0 * 7 + 5] * c1;
terms0->x0[i] = termx;
terms0->x1[i] = termy;
terms0->x2[i] = termz;
}
if(part[node1] == t)
{
c0 = dx * w[node1 * 7 + 1] - dy;
c1 = dx * w[node1 * 7 + 2] - dz;
c1 = - w[node1 * 7 + 4] * c0 + c1;
termx = w[node1 * 7 + 3] * w[node1 * 7 + 1] * c0;
termx = -dx * w[node1 * 7 + 0] - termx;
termx += w[node1 * 7 + 6] * c1;
termy = w[node1 * 7 + 4] * w[node1 * 7 + 5] * c1;
termy = w[node1 * 7 + 3] * c0 - termy;
termz = w[node1 * 7 + 5] * c1;
terms1->x0[i] = termx;
terms1->x1[i] = termy;
terms1->x2[i] = termz;
}
}
}
}
/* Do w22 */
static inline void
w2alloc(const struct geometry *g, double *w)
{
const uint32_t *ie = g->s->i;
const uint32_t *part = g->n->part;
const uint32_t *n0 = g->e->eptr->n0;
const uint32_t *n1 = g->e->eptr->n1;
const double *x0 = g->n->xyz->x0;
const double *x1 = g->n->xyz->x1;
const double *x2 = g->n->xyz->x2;
#pragma omp parallel
{
const uint32_t t = (uint32_t) omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const double coordx0 = x0[node0];
const double coordy0 = x1[node0];
const double coordz0 = x2[node0];
const double coordx1 = x0[node1];
const double coordy1 = x1[node1];
const double coordz1 = x2[node1];
const double dx = coordx1 - coordx0;
const double dy = coordy1 - coordy0;
const double dz = coordz1 - coordz0;
if(part[node0] == t)
{
const double d0 = w[node0 * 7 + 1] / w[node0 * 7 + 0];
const double d0_ = dy - dx * d0;
const double d1 = w[node0 * 7 + 2] / w[node0 * 7 + 0];
const double d1_ = dz - dx * d1;
const double d2 = w[node0 * 7 + 4] / w[node0 * 7 + 3];
const double d2_ = d1_ - d2 * d0_;
w[node0 * 7 + 5] += d2_ * d2_;
}
if(part[node1] == t)
{
const double d0 = w[node1 * 7 + 1] / w[node1 * 7 + 0];
const double d0_ = -dy + dx * d0;
const double d1 = w[node1 * 7 + 2] / w[node1 * 7 + 0];
const double d1_ = -dz + dx * d1;
const double d2 = w[node1 * 7 + 4] / w[node1 * 7 + 3];
const double d2_ = d1_ - d2 * d0_;
w[node1 * 7 + 5] += d2_ * d2_;
}
}
}
}
/* Do w11 and w12 */
static inline void
w1alloc(const struct geometry *g, double *w)
{
const uint32_t *ie = g->s->i;
const uint32_t *part = g->n->part;
const uint32_t *n0 = g->e->eptr->n0;
const uint32_t *n1 = g->e->eptr->n1;
const double *x0 = g->n->xyz->x0;
const double *x1 = g->n->xyz->x1;
const double *x2 = g->n->xyz->x2;
#pragma omp parallel
{
const uint32_t t = (uint32_t) omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const double coordx0 = x0[node0];
const double coordy0 = x1[node0];
const double coordz0 = x2[node0];
const double coordx1 = x0[node1];
const double coordy1 = x1[node1];
const double coordz1 = x2[node1];
/* Compute the difference of each coordinate component */
const double dx = coordx1 - coordx0;
const double dy = coordy1 - coordy0;
const double dz = coordz1 - coordz0;
if(part[node0] == t)
{
const double d = w[node0 * 7 + 1] / w[node0 * 7 + 0];
const double d_ = dy - dx * d;
w[node0 * 7 + 3] += d_ * d_;
w[node0 * 7 + 4] += d_ * dz;
}
if(part[node1] == t)
{
const double d = w[node1 * 7 + 1] / w[node1 * 7 + 0];
const double d_ = -dy + dx * d;
w[node1 * 7 + 3] += d_ * d_;
w[node1 * 7 + 4] -= d_ * dz;
}
}
}
}
/*
Compute w00, w01, and w02 in parallel
*/
static inline void
w0alloc(const struct geometry *g, double *w)
{
const uint32_t *ie = g->s->i;
const uint32_t *part = g->n->part;
const uint32_t *n0 = g->e->eptr->n0;
const uint32_t *n1 = g->e->eptr->n1;
const double *x0 = g->n->xyz->x0;
const double *x1 = g->n->xyz->x1;
const double *x2 = g->n->xyz->x2;
#pragma omp parallel
{
const uint32_t t = (uint32_t) omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const double coordx0 = x0[node0];
const double coordy0 = x1[node0];
const double coordz0 = x2[node0];
const double coordx1 = x0[node1];
const double coordy1 = x1[node1];
const double coordz1 = x2[node1];
/*
* Write-back: Update endpoints
* */
if(part[node0] == t) // Do the left endpoint
{
const double res_x = coordx1 - coordx0;
const double res_y = coordy1 - coordy0;
const double res_z = coordz1 - coordz0;
w[node0 * 7 + 0] += res_x * res_x;
w[node0 * 7 + 1] += res_x * res_y;
w[node0 * 7 + 2] += res_x * res_z;
}
if(part[node1] == t) // Do the right endpoint
{
const double res_x = coordx0 - coordx1;
const double res_y = coordy0 - coordy1;
const double res_z = coordz0 - coordz1;
w[node1 * 7 + 0] += res_x * res_x;
w[node1 * 7 + 1] += res_x * res_y;
w[node1 * 7 + 2] += res_x * res_z;
}
}
}
}
void
wmalloc(struct geometry *g)
{
size_t nnodes = g->n->sz;
double *w = (double *) fun3d_calloc(7 * nnodes, sizeof(double));
uint32_t i;
/* Do w00, w01, and w02 */
w0alloc(g, w);
/* Compute ||x|| (norm) and divide the other by
the computed norm */
#pragma omp parallel for
for(i = 0; i < nnodes; i++)
{
w[i * 7 + 0] = sqrt(w[i * 7 + 0]);
w[i * 7 + 1] /= w[i * 7 + 0];
w[i * 7 + 2] /= w[i * 7 + 0];
}
/* Do w11 and w12 */
w1alloc(g, w);
#pragma omp parallel for
for(i = 0; i < nnodes; i++)
{
w[i * 7 + 3] = sqrt(w[i * 7 + 3]);
w[i * 7 + 4] /= w[i * 7 + 3];
}
/* Do w22 */
w2alloc(g, w);
/* Update the magnitudes. Stuffs contributed by Dinesh 1998 */
#pragma omp parallel for
for(i = 0; i < nnodes; i++)
{
w[i * 7 + 5] = sqrt(w[i * 7 + 5]);
double sw00 = w[i * 7 + 0] * w[i * 7 + 0];
double sw11 = w[i * 7 + 3] * w[i * 7 + 3];
double sw22 = w[i * 7 + 5] * w[i * 7 + 5];
double w00 = 1.f / sw00;
double w11 = 1.f / sw11;
double w22 = 1.f / sw22;
double w01 = w[i * 7 + 1] / w[i * 7 + 0];
double w02 = w[i * 7 + 2] / w[i * 7 + 0];
double w12 = w[i * 7 + 4] / w[i * 7 + 3];
double m0 = w[i * 7 + 1] * w[i * 7 + 4];
m0 -= w[i * 7 + 2] * w[i * 7 + 3];
double m1 = w[i * 7 + 0] * w[i * 7 + 3] * sw22;
double w33 = m0 / m1;
w[i * 7 + 0] = w00;
w[i * 7 + 3] = w11;
w[i * 7 + 5] = w22;
w[i * 7 + 1] = w01;
w[i * 7 + 2] = w02;
w[i * 7 + 4] = w12;
w[i * 7 + 6] = w33;
}
size_t nedges = g->e->sz;
struct xyz *terms0 = (struct xyz *) fun3d_malloc(1, sizeof(struct xyz));
double *wtermsx0 = (double *) fun3d_calloc(nedges, sizeof(double));
double *wtermsy0 = (double *) fun3d_calloc(nedges, sizeof(double));
double *wtermsz0 = (double *) fun3d_calloc(nedges, sizeof(double));
terms0->x0 = wtermsx0;
terms0->x1 = wtermsy0;
terms0->x2 = wtermsz0;
struct xyz *terms1 = (struct xyz *) fun3d_malloc(1, sizeof(struct xyz));
double *wtermsx1 = (double *) fun3d_calloc(nedges, sizeof(double));
double *wtermsy1 = (double *) fun3d_calloc(nedges, sizeof(double));
double *wtermsz1 = (double *) fun3d_calloc(nedges, sizeof(double));
terms1->x0 = wtermsx1;
terms1->x1 = wtermsy1;
terms1->x2 = wtermsz1;
compute_terms(g, w, terms0, terms1);
fun3d_free(w);
struct weights *weights = (struct weights *) fun3d_malloc(1, sizeof(struct weights));
weights->w0 = terms0;
weights->w1 = terms1;
g->e->w = weights;
} |
tree.h | #pragma once
template <class TreePtcl> class Tree{
static const std::size_t MAX_LEVEL = 20;
static const std::size_t N_LEAF = 16;
static const double EXPAND = 1.0;
static const double MAC = 0.5;//Multipole Acceptance Criterion
typedef unsigned long long int ulli;
struct node_t{
vec3<double> mass_center, min, max;
//mat33<double> Quad;
double mass, size, radius, dmax;
std::size_t Nptcl, head_ptcl, more;
unsigned short int Nbranch;
unsigned int level;
node_t(){
Nptcl = 0;
mass_center = 0;
mass = 0;
}
~node_t(){
}
inline bool isEmpty() const{
return (Nptcl == 0) ? true : false;
}
inline bool isLeaf(const unsigned int N) const{
return (Nptcl <= N) ? true : false;
}
inline const node_t* const getPointer() const{
return this;
}
inline node_t* getPointer(){
return this;
}
};
struct ptcl_ptr_t{
ulli key;
const TreePtcl* ptr;
bool operator < (const ptcl_ptr_t& right) const{
return key < right.key;
}
ptcl_ptr_t(const ulli _key, const TreePtcl* const _ptr) : key(_key), ptr(_ptr){
}
};
//member variables;
std::vector<node_t> node;
std::vector<ptcl_ptr_t> ptcl_ptr;
//member functions;
node_t createRoot(const std::vector<TreePtcl >& ptcl, const kernel_t<double>& kernel){
node_t root;
root.min = + 1.0e+30;
root.max = - 1.0e+30;
root.mass_center = 0;
root.mass = 0;
root.Nptcl = ptcl.size();
root.head_ptcl = 0;
root.level = 0;
root.radius = 0;
root.dmax = 0;
for(std::size_t i = 0 ; i < ptcl.size() ; ++ i){
root.min.x = math::min(root.min.x, ptcl[i].r.x - kernel.width * ptcl[i].h);
root.min.y = math::min(root.min.y, ptcl[i].r.y - kernel.width * ptcl[i].h);
root.min.z = math::min(root.min.z, ptcl[i].r.z - kernel.width * ptcl[i].h);
root.max.x = math::max(root.max.x, ptcl[i].r.x + kernel.width * ptcl[i].h);
root.max.y = math::max(root.max.y, ptcl[i].r.y + kernel.width * ptcl[i].h);
root.max.z = math::max(root.max.z, ptcl[i].r.z + kernel.width * ptcl[i].h);
}
root.size = math::max(math::max((root.max - root.min).y, (root.max - root.min).z), (root.max - root.min).x);
return root;
}
//key maker
inline ulli SpreadBits(ulli x) const{
x = (x | (x << 32)) & 0x7fff00000000ffff; // 0b0111111111111111000000000000000000000000000000001111111111111111
x = (x | (x << 16)) & 0x00ff0000ff0000ff; // 0b0000000011111111000000000000000011111111000000000000000011111111
x = (x | (x << 8)) & 0x700f00f00f00f00f; // 0b0111000000001111000000001111000000001111000000001111000000001111
x = (x | (x << 4)) & 0x30c30c30c30c30c3; // 0b0011000011000011000011000011000011000011000011000011000011000011
x = (x | (x << 2)) & 0x1249249249249249; // 0b0001001001001001001001001001001001001001001001001001001001001001
return x;
}
inline ulli zorder3d(const ulli x, const ulli y, const ulli z) const{
return (SpreadBits(x) | (SpreadBits(y) << 1) | (SpreadBits(z) << 2));
}
inline unsigned int GetBranchId(const ulli key, const unsigned int level) const{
return key >> (3 * MAX_LEVEL - 3 * level) & 0x7; // 0b111
}
inline bool isOverwrapping(const TreePtcl& ptcl, const kernel_t<double>& kernel, const node_t& node, const boundary_t<double>& boundary) const{
return (abs(boundary.Periodic(ptcl.r - node.mass_center)) > math::max(node.radius, node.dmax + EXPAND * kernel.width * ptcl.h)) ? false : true;
}
inline bool isClose(const TreePtcl& ptcl, const node_t& node) const{
return (abs(ptcl.r - node.mass_center) * MAC < node.size) ? true : false;
}
//public member functions
public:
Tree(){
node.clear();
ptcl_ptr.clear();
}
//make tree structure
void Plant(const std::vector<TreePtcl >& ptcl, const kernel_t<double>& kernel){
//Clear;
node.clear();
ptcl_ptr.clear();
ptcl_ptr.reserve(ptcl.size());
//Set root domain;
node.push_back(createRoot(ptcl, kernel));
//Create particle pointer
const ulli grid_size = 1 << MAX_LEVEL;
for(std::size_t i = 0 ; i < ptcl.size() ; ++ i){
struct{
ulli x, y, z;
} grid_address;
grid_address.x = static_cast<ulli>((ptcl[i].r.x - node[0].min.x) / (node[0].max.x - node[0].min.x) * static_cast<double>(grid_size));
grid_address.y = static_cast<ulli>((ptcl[i].r.y - node[0].min.y) / (node[0].max.y - node[0].min.y) * static_cast<double>(grid_size));
grid_address.z = static_cast<ulli>((ptcl[i].r.z - node[0].min.z) / (node[0].max.z - node[0].min.z) * static_cast<double>(grid_size));
const ulli zkey = zorder3d(grid_address.x, grid_address.y, grid_address.z);
const TreePtcl* const ptr = ptcl[i].getPointer();
ptcl_ptr.push_back(ptcl_ptr_t(zkey, ptr));
}
std::sort(ptcl_ptr.begin(), ptcl_ptr.end());
//__gnu_parallel::sort(ptcl_ptr.begin(), ptcl_ptr.end());
//Create tree structure;
for(std::size_t n = 0 ; n < node.size() ; ++ n){
if(node[n].isLeaf(N_LEAF) == false){
node[n].more = node.size();//
//have a baby nodes
node_t child[8];
for(short unsigned int c = 0 ; c < 8 ; ++ c){
child[c].size = 0.5 * node[n].size;
child[c].level = node[n].level + 1;
child[c].radius = 0.0;
child[c].dmax = 0.0;
child[c].Nbranch = 0;
child[c].Nptcl = 0;
child[c].more = 0;//NULL
}
for(std::size_t i = node[n].head_ptcl ; i < node[n].head_ptcl + node[n].Nptcl ; ++ i){
const std::size_t c = GetBranchId(ptcl_ptr[i].key, node[n].level + 1);
++ child[c].Nptcl;
}
child[0].head_ptcl = node[n].head_ptcl;
for(std::size_t c = 1 ; c < 8 ; ++ c){
child[c].head_ptcl = child[c-1].head_ptcl + child[c-1].Nptcl;
}
node[n].Nbranch = 0;
for(std::size_t c = 0 ; c < 8 ; ++ c){
if(__builtin_expect(child[c].isEmpty() == true, 0)) continue;
++ node[n].Nbranch;
node.push_back(child[c]);
}
}
}
}
//get cell properties
void setCellProperties(const kernel_t<double>& kernel){
//Create bounding box;
#if 0 //Top Down;
for(vector<node_t>::iterator cell = node.begin() ; cell != node.end() ; ++ cell){
cell->mass_center = 0;
cell->mass = 0;
cell->radius = 0;
cell->dmax = 0;
for(std::size_t i = cell->head_ptcl ; i < cell->head_ptcl + cell->Nptcl ; ++ i){
cell->mass_center += ptcl_ptr[i].ptr->r * ptcl_ptr[i].ptr->Q.mass;
cell->mass += ptcl_ptr[i].ptr->Q.mass;
}
cell->mass_center /= cell->mass;
for(std::size_t i = cell->head_ptcl ; i < cell->head_ptcl + cell->Nptcl ; ++ i){
cell->radius = math::max(cell->radius, abs(cell->mass_center - ptcl_ptr[i].ptr->r) + EXPAND * kernel.width * ptcl_ptr[i].ptr->h);
cell->dmax = math::max(cell->dmax , abs(cell->mass_center - ptcl_ptr[i].ptr->r));
}
}
#else //Bottom Up;
for(typename std::vector<node_t>::reverse_iterator cell = node.rbegin() ; cell != node.rend() ; ++ cell){
cell->mass_center = 0;
cell->mass = 0;
cell->radius = 0;
cell->dmax = 0;
if(__builtin_expect(cell->isLeaf(N_LEAF) == true, 0)){
for(std::size_t i = cell->head_ptcl ; i < cell->head_ptcl + cell->Nptcl ; ++ i){
cell->mass_center += ptcl_ptr[i].ptr->r * ptcl_ptr[i].ptr->mass;
cell->mass += ptcl_ptr[i].ptr->mass;
}
cell->mass_center /= cell->mass;
for(std::size_t i = cell->head_ptcl ; i < cell->head_ptcl + cell->Nptcl ; ++ i){
const double dis = abs(cell->mass_center - ptcl_ptr[i].ptr->r);
cell->radius = math::max(cell->radius, dis + EXPAND * kernel.width * ptcl_ptr[i].ptr->h);
cell->dmax = math::max(cell->dmax , dis);
}
}else{
for(std::size_t b = cell->more ; b < cell->more + cell->Nbranch ; ++ b){
cell->mass_center += node[b].mass * node[b].mass_center;
cell->mass += node[b].mass;
}
cell->mass_center /= cell->mass;
for(std::size_t b = cell->more ; b < cell->more + cell->Nbranch ; ++ b){
const double dis = abs(cell->mass_center - node[b].mass_center);
cell->radius = math::max(cell->radius, dis + node[b].radius);
cell->dmax = math::max(cell->dmax , dis + node[b].dmax );
}
}
}
#endif
}
//set Ngb List
void setNeighbourList(std::vector<TreePtcl >& ptcl, const kernel_t<double>& kernel, const boundary_t<double>& boundary){
#pragma omp parallel for //schedule(guided)
for(std::size_t i = 0 ; i < ptcl.size() ; ++ i){
ptcl[i].ngb.clear();
std::stack<const node_t*> stack;
stack.push(node[0].getPointer());
while(stack.empty() == false){
const node_t* const cell = stack.top(); stack.pop();
if(isOverwrapping(ptcl[i], kernel, *cell, boundary) == true){
if((*cell).isLeaf(N_LEAF) == true){
for(std::size_t j = (*cell).head_ptcl ; j < (*cell).head_ptcl + (*cell).Nptcl ; ++ j){
if(abs(boundary.Periodic(ptcl[i].r - ptcl_ptr[j].ptr->r)) < kernel.width * std::max(ptcl[i].h, ptcl_ptr[j].ptr->h)) ptcl[i].ngb.push_back(ptcl_ptr[j].ptr);
//ptcl[i].ngb.push_back(ptcl_ptr[j].ptr);
}
}else{
for(std::size_t b = (*cell).more ; b < (*cell).more + (*cell).Nbranch ; ++ b) stack.push(node[b].getPointer());
}
}
}
}
}
//get gravity
void setSelfGravity(std::vector<TreePtcl >& ptcl);
//Dumping function
void Dump(char const * const filename) const{
FILE* fp = fopen(filename, "w");
for(std::size_t i = 0 ; i < node.size() ; ++ i){
fprintf(fp, "%e\t%e\t%e\t%e\t%e\n", node[i].mass_center.x, node[i].mass_center.y, node[i].mass_center.z, node[i].radius, node[i].dmax);
}
fclose(fp);
}
};
|
omp_master_3.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include "omp_testsuite.h"
int test_omp_master_3()
{
int nthreads;
int executing_thread;
int tid_result = 0; /* counts up the number of wrong thread no. for
the master thread. (Must be 0) */
nthreads = 0;
executing_thread = -1;
#pragma omp parallel
{
#pragma omp master
{
int tid = omp_get_thread_num();
if (tid != 0) {
#pragma omp critical
{ tid_result++; }
}
#pragma omp critical
{
nthreads++;
}
executing_thread = omp_get_thread_num ();
} /* end of master*/
} /* end of parallel*/
return ((nthreads == 1) && (executing_thread == 0) && (tid_result == 0));
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_master_3()) {
num_failed++;
}
}
return num_failed;
}
|
ast-dump-openmp-target-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:4:1, col:24>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:10:1, col:24>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:17:1, col:36>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:25, col:35>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:34> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:34> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:24:1, col:36>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:25, col:35>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:34> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:34> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTargetSimdDirective {{.*}} <line:31:1, col:36>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:25, col:35>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:34> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:34> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
decorate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE %
% D D E C O O R R A A T E %
% D D EEE C O O RRRR AAAAA T EEE %
% D D E C O O R R A A T E %
% DDDD EEEEE CCCC OOO R R A A T EEEEE %
% %
% %
% MagickCore Image Decoration Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-view.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/decorate.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
Define declarations.
*/
#define AccentuateModulate ScaleCharToQuantum(80)
#define HighlightModulate ScaleCharToQuantum(125)
#define ShadowModulate ScaleCharToQuantum(135)
#define DepthModulate ScaleCharToQuantum(185)
#define TroughModulate ScaleCharToQuantum(110)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B o r d e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BorderImage() surrounds the image with a border of the color defined by
% the bordercolor member of the image structure. The width and height
% of the border are defined by the corresponding members of the border_info
% structure.
%
% The format of the BorderImage method is:
%
% Image *BorderImage(const Image *image,const RectangleInfo *border_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o border_info: Define the width and height of the border.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BorderImage(const Image *image,
const RectangleInfo *border_info,ExceptionInfo *exception)
{
Image
*border_image,
*clone_image;
FrameInfo
frame_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(border_info != (RectangleInfo *) NULL);
frame_info.width=image->columns+(border_info->width << 1);
frame_info.height=image->rows+(border_info->height << 1);
frame_info.x=(ssize_t) border_info->width;
frame_info.y=(ssize_t) border_info->height;
frame_info.inner_bevel=0;
frame_info.outer_bevel=0;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
clone_image->matte_color=image->border_color;
border_image=FrameImage(clone_image,&frame_info,exception);
clone_image=DestroyImage(clone_image);
if (border_image != (Image *) NULL)
border_image->matte_color=image->matte_color;
return(border_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F r a m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FrameImage() adds a simulated three-dimensional border around the image.
% The color of the border is defined by the matte_color member of image.
% Members width and height of frame_info specify the border width of the
% vertical and horizontal sides of the frame. Members inner and outer
% indicate the width of the inner and outer shadows of the frame.
%
% The format of the FrameImage method is:
%
% Image *FrameImage(const Image *image,const FrameInfo *frame_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o frame_info: Define the width and height of the frame and its bevels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info,
ExceptionInfo *exception)
{
#define FrameImageTag "Frame/Image"
CacheView
*image_view,
*frame_view;
Image
*frame_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
accentuate,
border,
highlight,
interior,
matte,
shadow,
trough;
register ssize_t
x;
size_t
bevel_width,
height,
width;
ssize_t
y;
/*
Check frame geometry.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(frame_info != (FrameInfo *) NULL);
if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel);
width=frame_info->width-frame_info->x-bevel_width;
height=frame_info->height-frame_info->y-bevel_width;
if ((width < image->columns) || (height < image->rows))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
/*
Initialize framed image attributes.
*/
frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue,
exception);
if (frame_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(frame_image,DirectClass) == MagickFalse)
{
InheritException(exception,&frame_image->exception);
frame_image=DestroyImage(frame_image);
return((Image *) NULL);
}
if (frame_image->matte_color.opacity != OpaqueOpacity)
frame_image->matte=MagickTrue;
frame_image->page=image->page;
if ((image->page.width != 0) && (image->page.height != 0))
{
frame_image->page.width+=frame_image->columns-image->columns;
frame_image->page.height+=frame_image->rows-image->rows;
}
/*
Initialize 3D effects color.
*/
GetMagickPixelPacket(frame_image,&interior);
SetMagickPixelPacket(frame_image,&image->border_color,(IndexPacket *) NULL,
&interior);
GetMagickPixelPacket(frame_image,&matte);
matte.colorspace=RGBColorspace;
SetMagickPixelPacket(frame_image,&image->matte_color,(IndexPacket *) NULL,
&matte);
GetMagickPixelPacket(frame_image,&border);
border.colorspace=RGBColorspace;
SetMagickPixelPacket(frame_image,&image->border_color,(IndexPacket *) NULL,
&border);
GetMagickPixelPacket(frame_image,&accentuate);
accentuate.red=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate)));
accentuate.green=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate)));
accentuate.blue=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate)));
accentuate.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&highlight);
highlight.red=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.red+(QuantumRange*HighlightModulate)));
highlight.green=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.green+(QuantumRange*HighlightModulate)));
highlight.blue=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate)));
highlight.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&shadow);
shadow.red=QuantumScale*matte.red*ShadowModulate;
shadow.green=QuantumScale*matte.green*ShadowModulate;
shadow.blue=QuantumScale*matte.blue*ShadowModulate;
shadow.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&trough);
trough.red=QuantumScale*matte.red*TroughModulate;
trough.green=QuantumScale*matte.green*TroughModulate;
trough.blue=QuantumScale*matte.blue*TroughModulate;
trough.opacity=matte.opacity;
if (image->colorspace == CMYKColorspace)
{
ConvertRGBToCMYK(&interior);
ConvertRGBToCMYK(&matte);
ConvertRGBToCMYK(&border);
ConvertRGBToCMYK(&accentuate);
ConvertRGBToCMYK(&highlight);
ConvertRGBToCMYK(&shadow);
ConvertRGBToCMYK(&trough);
}
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
frame_view=AcquireCacheView(frame_image);
height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (height != 0)
{
register IndexPacket
*restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Draw top of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns,
height,exception);
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
if (q != (PixelPacket *) NULL)
{
/*
Draw top of ornamental border.
*/
for (y=0; y < (ssize_t) frame_info->outer_bevel; y++)
{
for (x=0; x < (ssize_t) (frame_image->columns-y); x++)
{
if (x < y)
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) frame_info->inner_bevel; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
width=image->columns+((size_t) frame_info->inner_bevel << 1)-
y;
for (x=0; x < (ssize_t) width; x++)
{
if (x < y)
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
/*
Draw sides of ornamental border.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Initialize scanline with matte color.
*/
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y,
frame_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
/*
Set frame interior to interior color.
*/
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) || (image->matte != MagickFalse)))
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(frame_image,&interior,q,frame_indexes);
q++;
frame_indexes++;
}
else
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
(void) CopyMagickMemory(q,p,image->columns*sizeof(*p));
if ((image->colorspace == CMYKColorspace) &&
(frame_image->colorspace == CMYKColorspace))
{
(void) CopyMagickMemory(frame_indexes,indexes,image->columns*
sizeof(*indexes));
frame_indexes+=image->columns;
}
q+=image->columns;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FrameImage)
#endif
proceed=SetImageProgress(image,FrameImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
height=(size_t) (frame_info->inner_bevel+frame_info->height-
frame_info->y-image->rows-bevel_width+frame_info->outer_bevel);
if (height != 0)
{
register IndexPacket
*restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Draw bottom of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows-
height),frame_image->columns,height,exception);
if (q != (PixelPacket *) NULL)
{
/*
Draw bottom of ornamental border.
*/
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (y=frame_info->inner_bevel-1; y >= 0; y--)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y))
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
height=frame_info->height-frame_info->y-image->rows-bevel_width;
for (y=0; y < (ssize_t) height; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=frame_info->outer_bevel-1; y >= 0; y--)
{
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
if (x >= (ssize_t) (frame_image->columns-y))
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
frame_view=DestroyCacheView(frame_view);
image_view=DestroyCacheView(image_view);
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) || (image->matte != MagickFalse)))
{
x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+
frame_info->inner_bevel);
y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
(void) CompositeImage(frame_image,image->compose,image,x,y);
}
return(frame_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RaiseImage() creates a simulated three-dimensional button-like effect
% by lightening and darkening the edges of the image. Members width and
% height of raise_info define the width of the vertical and horizontal
% edge of the effect.
%
% The format of the RaiseImage method is:
%
% MagickBooleanType RaiseImage(const Image *image,
% const RectangleInfo *raise_info,const MagickBooleanType raise)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o raise_info: Define the width and height of the raise area.
%
% o raise: A value other than zero creates a 3-D raise effect,
% otherwise it has a lowered effect.
%
*/
MagickExport MagickBooleanType RaiseImage(Image *image,
const RectangleInfo *raise_info,const MagickBooleanType raise)
{
#define AccentuateFactor ScaleCharToQuantum(135)
#define HighlightFactor ScaleCharToQuantum(190)
#define ShadowFactor ScaleCharToQuantum(190)
#define RaiseImageTag "Raise/Image"
#define TroughFactor ScaleCharToQuantum(135)
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
foreground,
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(raise_info != (RectangleInfo *) NULL);
if ((image->columns <= (raise_info->width << 1)) ||
(image->rows <= (raise_info->height << 1)))
ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth",
image->filename);
foreground=(Quantum) QuantumRange;
background=(Quantum) 0;
if (raise == MagickFalse)
{
foreground=(Quantum) 0;
background=(Quantum) QuantumRange;
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Raise image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) raise_info->height; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < y; x++)
{
q->red=ClampToQuantum(QuantumScale*((MagickRealType) q->red*
HighlightFactor+(MagickRealType) foreground*(QuantumRange-
HighlightFactor)));
q->green=ClampToQuantum(QuantumScale*((MagickRealType) q->green*
HighlightFactor+(MagickRealType) foreground*(QuantumRange-
HighlightFactor)));
q->blue=ClampToQuantum(QuantumScale*((MagickRealType) q->blue*
HighlightFactor+(MagickRealType) foreground*(QuantumRange-
HighlightFactor)));
q++;
}
for ( ; x < (ssize_t) (image->columns-y); x++)
{
q->red=ClampToQuantum(QuantumScale*((MagickRealType) q->red*
AccentuateFactor+(MagickRealType) foreground*(QuantumRange-
AccentuateFactor)));
q->green=ClampToQuantum(QuantumScale*((MagickRealType) q->green*
AccentuateFactor+(MagickRealType) foreground*(QuantumRange-
AccentuateFactor)));
q->blue=ClampToQuantum(QuantumScale*((MagickRealType) q->blue*
AccentuateFactor+(MagickRealType) foreground*(QuantumRange-
AccentuateFactor)));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
q->red=ClampToQuantum(QuantumScale*((MagickRealType) q->red*ShadowFactor+
(MagickRealType) background*(QuantumRange-ShadowFactor)));
q->green=ClampToQuantum(QuantumScale*((MagickRealType) q->green*
ShadowFactor+(MagickRealType) background*(QuantumRange-ShadowFactor)));
q->blue=ClampToQuantum(QuantumScale*((MagickRealType) q->blue*
ShadowFactor+(MagickRealType) background*(QuantumRange-ShadowFactor)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) raise_info->width; x++)
{
q->red=ClampToQuantum(QuantumScale*((MagickRealType) q->red*
HighlightFactor+(MagickRealType) foreground*(QuantumRange-
HighlightFactor)));
q->green=ClampToQuantum(QuantumScale*((MagickRealType) q->green*
HighlightFactor+(MagickRealType) foreground*(QuantumRange-
HighlightFactor)));
q->blue=ClampToQuantum(QuantumScale*((MagickRealType) q->blue*
HighlightFactor+(MagickRealType) foreground*(QuantumRange-
HighlightFactor)));
q++;
}
for ( ; x < (ssize_t) (image->columns-raise_info->width); x++)
q++;
for ( ; x < (ssize_t) image->columns; x++)
{
q->red=ClampToQuantum(QuantumScale*((MagickRealType) q->red*ShadowFactor+
(MagickRealType) background*(QuantumRange-ShadowFactor)));
q->green=ClampToQuantum(QuantumScale*((MagickRealType) q->green*
ShadowFactor+(MagickRealType) background*(QuantumRange-ShadowFactor)));
q->blue=ClampToQuantum(QuantumScale*((MagickRealType) q->blue*
ShadowFactor+(MagickRealType) background*(QuantumRange-ShadowFactor)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->rows-y); x++)
{
q->red=ClampToQuantum(QuantumScale*((MagickRealType) q->red*
HighlightFactor+(MagickRealType) foreground*(QuantumRange-
HighlightFactor)));
q->green=ClampToQuantum(QuantumScale*((MagickRealType) q->green*
HighlightFactor+(MagickRealType) foreground*(QuantumRange-
HighlightFactor)));
q->blue=ClampToQuantum(QuantumScale*((MagickRealType) q->blue*
HighlightFactor+(MagickRealType) foreground*(QuantumRange-
HighlightFactor)));
q++;
}
for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++)
{
q->red=ClampToQuantum(QuantumScale*((MagickRealType) q->red*TroughFactor+
(MagickRealType) background*(QuantumRange-TroughFactor)));
q->green=ClampToQuantum(QuantumScale*((MagickRealType) q->green*
TroughFactor+(MagickRealType) background*(QuantumRange-TroughFactor)));
q->blue=ClampToQuantum(QuantumScale*((MagickRealType) q->blue*
TroughFactor+(MagickRealType) background*(QuantumRange-TroughFactor)));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
q->red=ClampToQuantum(QuantumScale*((MagickRealType) q->red*ShadowFactor+
(MagickRealType) background*(QuantumRange-ShadowFactor)));
q->green=ClampToQuantum(QuantumScale*((MagickRealType) q->green*
ShadowFactor+(MagickRealType) background*(QuantumRange-ShadowFactor)));
q->blue=ClampToQuantum(QuantumScale*((MagickRealType) q->blue*
ShadowFactor+(MagickRealType) background*(QuantumRange-ShadowFactor)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
GB_binop__ge_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__ge_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__ge_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_int64)
// A*D function (colscale): GB (_AxD__ge_int64)
// D*A function (rowscale): GB (_DxB__ge_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_int64)
// C=scalar+B GB (_bind1st__ge_int64)
// C=scalar+B' GB (_bind1st_tran__ge_int64)
// C=A+scalar GB (_bind2nd__ge_int64)
// C=A'+scalar GB (_bind2nd_tran__ge_int64)
// C type: bool
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_INT64 || GxB_NO_GE_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ge_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
SparseDenseProduct.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEDENSEPRODUCT_H
#define EIGEN_SPARSEDENSEPRODUCT_H
namespace Eigen {
namespace internal {
template <> struct product_promote_storage_type<Sparse,Dense, OuterProduct> { typedef Sparse ret; };
template <> struct product_promote_storage_type<Dense,Sparse, OuterProduct> { typedef Sparse ret; };
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,
typename AlphaType,
int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,
bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
struct sparse_time_dense_product_impl;
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
typedef evaluator<Lhs> LhsEval;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
LhsEval lhsEval(lhs);
Index n = lhs.outerSize();
#ifdef EIGEN_HAS_OPENMP
Eigen::initParallel();
Index threads = Eigen::nbThreads();
#endif
for(Index c=0; c<rhs.cols(); ++c)
{
#ifdef EIGEN_HAS_OPENMP
// This 20000 threshold has been found experimentally on 2D and 3D Poisson problems.
// It basically represents the minimal amount of work to be done to be worth it.
if(threads>1 && lhsEval.nonZerosEstimate() > 20000)
{
#pragma omp parallel for schedule(static) num_threads(threads)
for(Index i=0; i<n; ++i)
processRow(lhsEval,rhs,res,alpha,i,c);
}
else
#endif
{
for(Index i=0; i<n; ++i)
processRow(lhsEval,rhs,res,alpha,i,c);
}
}
}
static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col)
{
typename Res::Scalar tmp(0);
for(LhsInnerIterator it(lhsEval,i); it ;++it)
tmp += it.value() * rhs.coeff(it.index(),col);
res.coeffRef(i,col) += alpha * tmp;
}
};
// FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format?
template<typename T1, typename T2/*, int _Options, typename _StrideType*/>
struct scalar_product_traits<T1, Ref<T2/*, _Options, _StrideType*/> >
{
enum {
Defined = 1
};
typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType;
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index c=0; c<rhs.cols(); ++c)
{
for(Index j=0; j<lhs.outerSize(); ++j)
{
// typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
typename internal::scalar_product_traits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.coeffRef(it.index(),c) += it.value() * rhs_j;
}
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Res::RowXpr res_j(res.row(j));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res_j += (alpha*it.value()) * rhs.row(it.index());
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Rhs::ConstRowXpr rhs_j(rhs.row(j));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.row(it.index()) += (alpha*it.value()) * rhs_j;
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>
inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha);
}
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SparseShape,DenseShape,ProductType> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? 1 : Rhs::ColsAtCompileTime>::type LhsNested;
typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==0) ? 1 : Dynamic>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
internal::sparse_time_dense_product(lhsNested, rhsNested, dst, alpha);
}
};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType>
: generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
{};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SparseShape,ProductType> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dst>
static void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? Dynamic : 1>::type LhsNested;
typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==RowMajorBit) ? 1 : Lhs::RowsAtCompileTime>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
// transpose everything
Transpose<Dst> dstT(dst);
internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
}
};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType>
: generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
{};
template<typename LhsT, typename RhsT, bool NeedToTranspose>
struct sparse_dense_outer_product_evaluator
{
protected:
typedef typename conditional<NeedToTranspose,RhsT,LhsT>::type Lhs1;
typedef typename conditional<NeedToTranspose,LhsT,RhsT>::type ActualRhs;
typedef Product<LhsT,RhsT,DefaultProduct> ProdXprType;
// if the actual left-hand side is a dense vector,
// then build a sparse-view so that we can seamlessly iterate over it.
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
Lhs1, SparseView<Lhs1> >::type ActualLhs;
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
Lhs1 const&, SparseView<Lhs1> >::type LhsArg;
typedef evaluator<ActualLhs> LhsEval;
typedef evaluator<ActualRhs> RhsEval;
typedef typename evaluator<ActualLhs>::InnerIterator LhsIterator;
typedef typename ProdXprType::Scalar Scalar;
public:
enum {
Flags = NeedToTranspose ? RowMajorBit : 0,
CoeffReadCost = HugeCost
};
class InnerIterator : public LhsIterator
{
public:
InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer)
: LhsIterator(xprEval.m_lhsXprImpl, 0),
m_outer(outer),
m_empty(false),
m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<ActualRhs>::StorageKind() ))
{}
EIGEN_STRONG_INLINE Index outer() const { return m_outer; }
EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); }
EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; }
EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; }
EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); }
protected:
Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const
{
return rhs.coeff(outer);
}
Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse())
{
typename RhsEval::InnerIterator it(rhs, outer);
if (it && it.index()==0 && it.value()!=Scalar(0))
return it.value();
m_empty = true;
return Scalar(0);
}
Index m_outer;
bool m_empty;
Scalar m_factor;
};
sparse_dense_outer_product_evaluator(const Lhs1 &lhs, const ActualRhs &rhs)
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
// transpose case
sparse_dense_outer_product_evaluator(const ActualRhs &rhs, const Lhs1 &lhs)
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
protected:
const LhsArg m_lhs;
evaluator<ActualLhs> m_lhsXprImpl;
evaluator<ActualRhs> m_rhsXprImpl;
};
// sparse * dense outer product
template<typename Lhs, typename Rhs>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, SparseShape, DenseShape>
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor>
{
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> Base;
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
explicit product_evaluator(const XprType& xpr)
: Base(xpr.lhs(), xpr.rhs())
{}
};
template<typename Lhs, typename Rhs>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, DenseShape, SparseShape>
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor>
{
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> Base;
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
explicit product_evaluator(const XprType& xpr)
: Base(xpr.lhs(), xpr.rhs())
{}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSEDENSEPRODUCT_H
|
GB_unaryop__abs_uint8_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint8_uint32
// op(A') function: GB_tran__abs_uint8_uint32
// C type: uint8_t
// A type: uint32_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT8 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint8_uint32
(
uint8_t *Cx, // Cx and Ax may be aliased
uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint8_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 4;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
Par-13-ParForNestedParForNestedParFor.c |
int main(int argc, char **argv) {
int a[4] = {1,2,3,4};
int b[4] = {1,1,1,1};
int c[4] = {0,2,1,3};
for (int i = 0; i < 1; ++i) {
if (i < 2) {
return -1;
}
}
#pragma omp parallel for
for (int i = 0; i < 4; ++i) {
a[i] = 3*a[i];
#pragma omp parallel for
for(int j = 0; j < 4; ++j) {
b[j] += a[i];
#pragma omp parallel for
for(int k = 0; k < 4; ++k) {
c[k] = a[i] * b[k] + c[k];
}
}
}
return 0;
}
|
particle_iter.kernel_runtime.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "local_header.h"
#include "openmp_pscmc_inc.h"
#include "particle_iter.kernel_inc.h"
int openmp_dump_ene_num_init (openmp_pscmc_env * pe ,openmp_dump_ene_num_struct * kerstr ){
return 0 ;}
void openmp_dump_ene_num_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_dump_ene_num_struct ));
}
int openmp_dump_ene_num_get_num_compute_units (openmp_dump_ene_num_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_dump_ene_num_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_dump_ene_num_exec (openmp_dump_ene_num_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_dump_ene_num_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_dump_ene_num_scmc_set_parameter_inoutput (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_xyzw (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_cu_cache (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_cu_xyzw (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_fieldE (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_fieldB (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_FoutJ (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_FoutEN (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_XLEN (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_YLEN (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_ZLEN (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_ovlp (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_numvec (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_num_ele (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_grid_cache_len (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_cu_cache_length (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_Mass (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_Charge (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_SPEC (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_NUM_SPEC (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_DELTA_X (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_DELTA_Y (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_DELTA_Z (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_dump_ene_num_scmc_set_parameter_Deltat (openmp_dump_ene_num_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_calculate_rho_init (openmp_pscmc_env * pe ,openmp_calculate_rho_struct * kerstr ){
return 0 ;}
void openmp_calculate_rho_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_calculate_rho_struct ));
}
int openmp_calculate_rho_get_num_compute_units (openmp_calculate_rho_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_calculate_rho_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_calculate_rho_exec (openmp_calculate_rho_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_calculate_rho_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_calculate_rho_scmc_set_parameter_inoutput (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_xyzw (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_cu_cache (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_cu_xyzw (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_fieldE (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_fieldB (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_FoutJ (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_FoutEN (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_XLEN (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_YLEN (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_ZLEN (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_ovlp (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_numvec (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_num_ele (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_grid_cache_len (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_cu_cache_length (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_Mass (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_Charge (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_SPEC (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_NUM_SPEC (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_DELTA_X (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_DELTA_Y (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_DELTA_Z (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_calculate_rho_scmc_set_parameter_Deltat (openmp_calculate_rho_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_krook_collision_test_init (openmp_pscmc_env * pe ,openmp_krook_collision_test_struct * kerstr ){
return 0 ;}
void openmp_krook_collision_test_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_krook_collision_test_struct ));
}
int openmp_krook_collision_test_get_num_compute_units (openmp_krook_collision_test_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_krook_collision_test_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_krook_collision_test_exec (openmp_krook_collision_test_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_krook_collision_test_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->mu_freq)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_krook_collision_test_scmc_set_parameter_inoutput (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_xyzw (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_cu_cache (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_cu_xyzw (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_fieldE (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_fieldB (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_FoutJ (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_XLEN (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_YLEN (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_ZLEN (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_ovlp (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_numvec (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_num_ele (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_grid_cache_len (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_cu_cache_length (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_Mass0 (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_Charge0 (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_Deltat (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_krook_collision_test_scmc_set_parameter_mu_freq (openmp_krook_collision_test_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->mu_freq = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_init (openmp_pscmc_env * pe ,openmp_krook_collision_remove_small_speed_struct * kerstr ){
return 0 ;}
void openmp_krook_collision_remove_small_speed_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_krook_collision_remove_small_speed_struct ));
}
int openmp_krook_collision_remove_small_speed_get_num_compute_units (openmp_krook_collision_remove_small_speed_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_krook_collision_remove_small_speed_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_krook_collision_remove_small_speed_exec (openmp_krook_collision_remove_small_speed_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_krook_collision_remove_small_speed_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->mu_freq)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_inoutput (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_xyzw (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_cu_cache (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_cu_xyzw (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_fieldE (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_fieldB (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_FoutJ (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_XLEN (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_YLEN (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_ZLEN (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_ovlp (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_numvec (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_num_ele (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_grid_cache_len (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_cu_cache_length (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_Mass0 (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_Charge0 (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_Deltat (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_krook_collision_remove_small_speed_scmc_set_parameter_mu_freq (openmp_krook_collision_remove_small_speed_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->mu_freq = pm->d_data);
}
int openmp_boris_yee_init (openmp_pscmc_env * pe ,openmp_boris_yee_struct * kerstr ){
return 0 ;}
void openmp_boris_yee_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_boris_yee_struct ));
}
int openmp_boris_yee_get_num_compute_units (openmp_boris_yee_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_boris_yee_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_boris_yee_exec (openmp_boris_yee_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_boris_yee_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_boris_yee_scmc_set_parameter_inoutput (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_boris_yee_scmc_set_parameter_xyzw (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_boris_yee_scmc_set_parameter_cu_cache (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_boris_yee_scmc_set_parameter_cu_xyzw (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_boris_yee_scmc_set_parameter_fieldE (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_boris_yee_scmc_set_parameter_fieldB (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_boris_yee_scmc_set_parameter_FoutJ (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_boris_yee_scmc_set_parameter_XLEN (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_boris_yee_scmc_set_parameter_YLEN (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_boris_yee_scmc_set_parameter_ZLEN (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_boris_yee_scmc_set_parameter_ovlp (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_boris_yee_scmc_set_parameter_numvec (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_boris_yee_scmc_set_parameter_num_ele (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_boris_yee_scmc_set_parameter_grid_cache_len (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_boris_yee_scmc_set_parameter_cu_cache_length (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_boris_yee_scmc_set_parameter_Mass0 (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_boris_yee_scmc_set_parameter_Charge0 (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_boris_yee_scmc_set_parameter_Deltat (openmp_boris_yee_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_init (openmp_pscmc_env * pe ,openmp_split_pass_E_particle_vlo_struct * kerstr ){
return 0 ;}
void openmp_split_pass_E_particle_vlo_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_E_particle_vlo_struct ));
}
int openmp_split_pass_E_particle_vlo_get_num_compute_units (openmp_split_pass_E_particle_vlo_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_E_particle_vlo_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_split_pass_E_particle_vlo_exec (openmp_split_pass_E_particle_vlo_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_E_particle_vlo_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_inoutput (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_xyzw (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_cu_cache (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_cu_xyzw (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_fieldE (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_fieldB (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_FoutJ (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_FoutEN (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_XLEN (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_YLEN (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_ZLEN (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_ovlp (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_numvec (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_num_ele (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_grid_cache_len (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_cu_cache_length (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_Mass (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_Charge (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_SPEC (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_NUM_SPEC (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_DELTA_X (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_DELTA_Y (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_DELTA_Z (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_E_particle_vlo_scmc_set_parameter_Deltat (openmp_split_pass_E_particle_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_E_particle_init (openmp_pscmc_env * pe ,openmp_split_pass_E_particle_struct * kerstr ){
return 0 ;}
void openmp_split_pass_E_particle_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_E_particle_struct ));
}
int openmp_split_pass_E_particle_get_num_compute_units (openmp_split_pass_E_particle_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_E_particle_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_split_pass_E_particle_exec (openmp_split_pass_E_particle_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_E_particle_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_E_particle_scmc_set_parameter_inoutput (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_xyzw (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_cu_cache (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_cu_xyzw (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_fieldE (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_fieldB (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_FoutJ (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_FoutEN (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_XLEN (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_YLEN (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_ZLEN (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_ovlp (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_numvec (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_num_ele (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_grid_cache_len (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_cu_cache_length (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_Mass (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_Charge (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_SPEC (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_NUM_SPEC (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_DELTA_X (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_DELTA_Y (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_DELTA_Z (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_E_particle_scmc_set_parameter_Deltat (openmp_split_pass_E_particle_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_init (openmp_pscmc_env * pe ,openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ){
return 0 ;}
void openmp_split_pass_z_vlo_sg2_nopush_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct ));
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_get_num_compute_units (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_get_xlen (){
return 1 ;}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_exec (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->LFoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_inoutput (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_xyzw (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_cu_cache (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_cu_xyzw (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_fieldE (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_fieldB (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_LFoutJ (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_FoutEN (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_XLEN (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_YLEN (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_ZLEN (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_ovlp (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_numvec (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_num_ele (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_grid_cache_len (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_cu_cache_length (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_Mass (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_Charge (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_SPEC (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_NUM_SPEC (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_DELTA_X (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_DELTA_Y (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_DELTA_Z (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_nopush_small_grids_scmc_set_parameter_Deltat (openmp_split_pass_z_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_init (openmp_pscmc_env * pe ,openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ){
return 0 ;}
void openmp_split_pass_z_vlo_sg2_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_z_vlo_sg2_small_grids_struct ));
}
int openmp_split_pass_z_vlo_sg2_small_grids_get_num_compute_units (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_z_vlo_sg2_small_grids_get_xlen (){
return 1 ;}
int openmp_split_pass_z_vlo_sg2_small_grids_exec (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_z_vlo_sg2_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->LFoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_inoutput (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_xyzw (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_cu_cache (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_cu_xyzw (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_fieldE (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_fieldB (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_LFoutJ (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_FoutEN (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_XLEN (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_YLEN (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_ZLEN (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_ovlp (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_numvec (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_num_ele (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_grid_cache_len (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_cu_cache_length (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_Mass (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_Charge (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_SPEC (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_NUM_SPEC (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_DELTA_X (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_DELTA_Y (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_DELTA_Z (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_z_vlo_sg2_small_grids_scmc_set_parameter_Deltat (openmp_split_pass_z_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_init (openmp_pscmc_env * pe ,openmp_split_pass_z_vlo_small_grids_struct * kerstr ){
return 0 ;}
void openmp_split_pass_z_vlo_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_z_vlo_small_grids_struct ));
}
int openmp_split_pass_z_vlo_small_grids_get_num_compute_units (openmp_split_pass_z_vlo_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_z_vlo_small_grids_get_xlen (){
return 1 ;}
int openmp_split_pass_z_vlo_small_grids_exec (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_z_vlo_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->LFoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_inoutput (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_xyzw (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_cu_cache (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_cu_xyzw (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_fieldE (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_fieldB (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_LFoutJ (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_FoutEN (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_XLEN (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_YLEN (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_ZLEN (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_ovlp (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_numvec (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_num_ele (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_grid_cache_len (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_cu_cache_length (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_Mass (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_Charge (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_SPEC (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_NUM_SPEC (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_DELTA_X (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_DELTA_Y (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_DELTA_Z (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_z_vlo_small_grids_scmc_set_parameter_Deltat (openmp_split_pass_z_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_init (openmp_pscmc_env * pe ,openmp_split_pass_z_sg2_small_grids_struct * kerstr ){
return 0 ;}
void openmp_split_pass_z_sg2_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_z_sg2_small_grids_struct ));
}
int openmp_split_pass_z_sg2_small_grids_get_num_compute_units (openmp_split_pass_z_sg2_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_z_sg2_small_grids_get_xlen (){
return 1 ;}
int openmp_split_pass_z_sg2_small_grids_exec (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_z_sg2_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->LFoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_inoutput (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_xyzw (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_cu_cache (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_cu_xyzw (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_fieldE (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_fieldB (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_LFoutJ (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_FoutEN (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_XLEN (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_YLEN (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_ZLEN (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_ovlp (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_numvec (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_num_ele (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_grid_cache_len (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_cu_cache_length (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_Mass (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_Charge (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_SPEC (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_NUM_SPEC (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_DELTA_X (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_DELTA_Y (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_DELTA_Z (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_z_sg2_small_grids_scmc_set_parameter_Deltat (openmp_split_pass_z_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_z_small_grids_init (openmp_pscmc_env * pe ,openmp_split_pass_z_small_grids_struct * kerstr ){
return 0 ;}
void openmp_split_pass_z_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_z_small_grids_struct ));
}
int openmp_split_pass_z_small_grids_get_num_compute_units (openmp_split_pass_z_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_z_small_grids_get_xlen (){
return 1 ;}
int openmp_split_pass_z_small_grids_exec (openmp_split_pass_z_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_z_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->LFoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_z_small_grids_scmc_set_parameter_inoutput (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_xyzw (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_cu_cache (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_cu_xyzw (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_fieldE (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_fieldB (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_LFoutJ (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_FoutEN (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_XLEN (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_YLEN (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_ZLEN (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_ovlp (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_numvec (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_num_ele (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_grid_cache_len (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_cu_cache_length (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_Mass (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_Charge (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_SPEC (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_NUM_SPEC (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_DELTA_X (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_DELTA_Y (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_DELTA_Z (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_z_small_grids_scmc_set_parameter_Deltat (openmp_split_pass_z_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_init (openmp_pscmc_env * pe ,openmp_split_pass_z_vlo_nopush_struct * kerstr ){
return 0 ;}
void openmp_split_pass_z_vlo_nopush_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_z_vlo_nopush_struct ));
}
int openmp_split_pass_z_vlo_nopush_get_num_compute_units (openmp_split_pass_z_vlo_nopush_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_z_vlo_nopush_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_split_pass_z_vlo_nopush_exec (openmp_split_pass_z_vlo_nopush_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_z_vlo_nopush_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_inoutput (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_xyzw (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_cu_cache (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_cu_xyzw (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_fieldE (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_fieldB (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_FoutJ (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_FoutEN (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_XLEN (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_YLEN (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_ZLEN (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_ovlp (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_numvec (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_num_ele (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_grid_cache_len (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_cu_cache_length (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_Mass (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_Charge (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_SPEC (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_NUM_SPEC (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_DELTA_X (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_DELTA_Y (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_DELTA_Z (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_z_vlo_nopush_scmc_set_parameter_Deltat (openmp_split_pass_z_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_z_vlo_init (openmp_pscmc_env * pe ,openmp_split_pass_z_vlo_struct * kerstr ){
return 0 ;}
void openmp_split_pass_z_vlo_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_z_vlo_struct ));
}
int openmp_split_pass_z_vlo_get_num_compute_units (openmp_split_pass_z_vlo_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_z_vlo_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_split_pass_z_vlo_exec (openmp_split_pass_z_vlo_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_z_vlo_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_z_vlo_scmc_set_parameter_inoutput (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_xyzw (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_cu_cache (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_cu_xyzw (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_fieldE (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_fieldB (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_FoutJ (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_FoutEN (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_XLEN (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_YLEN (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_ZLEN (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_ovlp (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_numvec (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_num_ele (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_grid_cache_len (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_cu_cache_length (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_Mass (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_Charge (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_SPEC (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_NUM_SPEC (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_DELTA_X (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_DELTA_Y (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_DELTA_Z (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_z_vlo_scmc_set_parameter_Deltat (openmp_split_pass_z_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_z_nopush_init (openmp_pscmc_env * pe ,openmp_split_pass_z_nopush_struct * kerstr ){
return 0 ;}
void openmp_split_pass_z_nopush_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_z_nopush_struct ));
}
int openmp_split_pass_z_nopush_get_num_compute_units (openmp_split_pass_z_nopush_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_z_nopush_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_split_pass_z_nopush_exec (openmp_split_pass_z_nopush_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_z_nopush_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_z_nopush_scmc_set_parameter_inoutput (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_xyzw (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_cu_cache (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_cu_xyzw (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_fieldE (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_fieldB (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_FoutJ (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_FoutEN (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_XLEN (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_YLEN (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_ZLEN (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_ovlp (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_numvec (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_num_ele (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_grid_cache_len (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_cu_cache_length (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_Mass (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_Charge (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_SPEC (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_NUM_SPEC (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_DELTA_X (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_DELTA_Y (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_DELTA_Z (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_z_nopush_scmc_set_parameter_Deltat (openmp_split_pass_z_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_z_init (openmp_pscmc_env * pe ,openmp_split_pass_z_struct * kerstr ){
return 0 ;}
void openmp_split_pass_z_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_z_struct ));
}
int openmp_split_pass_z_get_num_compute_units (openmp_split_pass_z_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_z_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_split_pass_z_exec (openmp_split_pass_z_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_z_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_z_scmc_set_parameter_inoutput (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_xyzw (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_cu_cache (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_cu_xyzw (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_fieldE (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_fieldB (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_FoutJ (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_FoutEN (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_XLEN (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_YLEN (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_ZLEN (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_ovlp (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_numvec (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_num_ele (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_grid_cache_len (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_cu_cache_length (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_Mass (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_Charge (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_SPEC (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_NUM_SPEC (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_DELTA_X (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_DELTA_Y (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_DELTA_Z (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_z_scmc_set_parameter_Deltat (openmp_split_pass_z_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_init (openmp_pscmc_env * pe ,openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ){
return 0 ;}
void openmp_split_pass_y_vlo_sg2_nopush_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct ));
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_get_num_compute_units (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_get_xlen (){
return 1 ;}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_exec (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->LFoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_inoutput (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_xyzw (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_cu_cache (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_cu_xyzw (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_fieldE (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_fieldB (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_LFoutJ (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_FoutEN (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_XLEN (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_YLEN (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_ZLEN (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_ovlp (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_numvec (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_num_ele (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_grid_cache_len (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_cu_cache_length (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_Mass (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_Charge (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_SPEC (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_NUM_SPEC (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_DELTA_X (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_DELTA_Y (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_DELTA_Z (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_nopush_small_grids_scmc_set_parameter_Deltat (openmp_split_pass_y_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_init (openmp_pscmc_env * pe ,openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ){
return 0 ;}
void openmp_split_pass_y_vlo_sg2_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_y_vlo_sg2_small_grids_struct ));
}
int openmp_split_pass_y_vlo_sg2_small_grids_get_num_compute_units (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_y_vlo_sg2_small_grids_get_xlen (){
return 1 ;}
int openmp_split_pass_y_vlo_sg2_small_grids_exec (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_y_vlo_sg2_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->LFoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_inoutput (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_xyzw (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_cu_cache (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_cu_xyzw (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_fieldE (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_fieldB (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_LFoutJ (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_FoutEN (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_XLEN (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_YLEN (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_ZLEN (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_ovlp (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_numvec (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_num_ele (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_grid_cache_len (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_cu_cache_length (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_Mass (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_Charge (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_SPEC (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_NUM_SPEC (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_DELTA_X (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_DELTA_Y (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_DELTA_Z (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_y_vlo_sg2_small_grids_scmc_set_parameter_Deltat (openmp_split_pass_y_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_init (openmp_pscmc_env * pe ,openmp_split_pass_y_vlo_small_grids_struct * kerstr ){
return 0 ;}
void openmp_split_pass_y_vlo_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_y_vlo_small_grids_struct ));
}
int openmp_split_pass_y_vlo_small_grids_get_num_compute_units (openmp_split_pass_y_vlo_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_y_vlo_small_grids_get_xlen (){
return 1 ;}
int openmp_split_pass_y_vlo_small_grids_exec (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_y_vlo_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->LFoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_inoutput (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_xyzw (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_cu_cache (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_cu_xyzw (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_fieldE (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_fieldB (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_LFoutJ (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_FoutEN (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_XLEN (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_YLEN (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_ZLEN (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_ovlp (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_numvec (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_num_ele (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_grid_cache_len (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_cu_cache_length (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_Mass (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_Charge (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_SPEC (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_NUM_SPEC (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_DELTA_X (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_DELTA_Y (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_DELTA_Z (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_y_vlo_small_grids_scmc_set_parameter_Deltat (openmp_split_pass_y_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_init (openmp_pscmc_env * pe ,openmp_split_pass_y_sg2_small_grids_struct * kerstr ){
return 0 ;}
void openmp_split_pass_y_sg2_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_y_sg2_small_grids_struct ));
}
int openmp_split_pass_y_sg2_small_grids_get_num_compute_units (openmp_split_pass_y_sg2_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_y_sg2_small_grids_get_xlen (){
return 1 ;}
int openmp_split_pass_y_sg2_small_grids_exec (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_y_sg2_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->LFoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_inoutput (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_xyzw (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_cu_cache (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_cu_xyzw (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_fieldE (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_fieldB (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_LFoutJ (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_FoutEN (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_XLEN (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_YLEN (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_ZLEN (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_ovlp (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_numvec (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_num_ele (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_grid_cache_len (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_cu_cache_length (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_Mass (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_Charge (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_SPEC (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_NUM_SPEC (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_DELTA_X (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_DELTA_Y (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_DELTA_Z (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_y_sg2_small_grids_scmc_set_parameter_Deltat (openmp_split_pass_y_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_y_small_grids_init (openmp_pscmc_env * pe ,openmp_split_pass_y_small_grids_struct * kerstr ){
return 0 ;}
void openmp_split_pass_y_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_y_small_grids_struct ));
}
int openmp_split_pass_y_small_grids_get_num_compute_units (openmp_split_pass_y_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_y_small_grids_get_xlen (){
return 1 ;}
int openmp_split_pass_y_small_grids_exec (openmp_split_pass_y_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_y_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->LFoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_y_small_grids_scmc_set_parameter_inoutput (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_xyzw (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_cu_cache (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_cu_xyzw (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_fieldE (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_fieldB (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_LFoutJ (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_FoutEN (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_XLEN (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_YLEN (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_ZLEN (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_ovlp (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_numvec (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_num_ele (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_grid_cache_len (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_cu_cache_length (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_Mass (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_Charge (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_SPEC (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_NUM_SPEC (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_DELTA_X (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_DELTA_Y (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_DELTA_Z (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_y_small_grids_scmc_set_parameter_Deltat (openmp_split_pass_y_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_init (openmp_pscmc_env * pe ,openmp_split_pass_y_vlo_nopush_struct * kerstr ){
return 0 ;}
void openmp_split_pass_y_vlo_nopush_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_y_vlo_nopush_struct ));
}
int openmp_split_pass_y_vlo_nopush_get_num_compute_units (openmp_split_pass_y_vlo_nopush_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_y_vlo_nopush_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_split_pass_y_vlo_nopush_exec (openmp_split_pass_y_vlo_nopush_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_y_vlo_nopush_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_inoutput (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_xyzw (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_cu_cache (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_cu_xyzw (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_fieldE (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_fieldB (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_FoutJ (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_FoutEN (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_XLEN (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_YLEN (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_ZLEN (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_ovlp (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_numvec (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_num_ele (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_grid_cache_len (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_cu_cache_length (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_Mass (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_Charge (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_SPEC (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_NUM_SPEC (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_DELTA_X (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_DELTA_Y (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_DELTA_Z (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_y_vlo_nopush_scmc_set_parameter_Deltat (openmp_split_pass_y_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_y_vlo_init (openmp_pscmc_env * pe ,openmp_split_pass_y_vlo_struct * kerstr ){
return 0 ;}
void openmp_split_pass_y_vlo_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_y_vlo_struct ));
}
int openmp_split_pass_y_vlo_get_num_compute_units (openmp_split_pass_y_vlo_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_y_vlo_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_split_pass_y_vlo_exec (openmp_split_pass_y_vlo_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_y_vlo_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_y_vlo_scmc_set_parameter_inoutput (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_xyzw (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_cu_cache (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_cu_xyzw (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_fieldE (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_fieldB (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_FoutJ (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_FoutEN (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_XLEN (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_YLEN (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_ZLEN (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_ovlp (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_numvec (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_num_ele (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_grid_cache_len (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_cu_cache_length (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_Mass (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_Charge (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_SPEC (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_NUM_SPEC (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_DELTA_X (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_DELTA_Y (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_DELTA_Z (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_y_vlo_scmc_set_parameter_Deltat (openmp_split_pass_y_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_y_nopush_init (openmp_pscmc_env * pe ,openmp_split_pass_y_nopush_struct * kerstr ){
return 0 ;}
void openmp_split_pass_y_nopush_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_y_nopush_struct ));
}
int openmp_split_pass_y_nopush_get_num_compute_units (openmp_split_pass_y_nopush_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_y_nopush_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_split_pass_y_nopush_exec (openmp_split_pass_y_nopush_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_y_nopush_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_y_nopush_scmc_set_parameter_inoutput (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_xyzw (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_cu_cache (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_cu_xyzw (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_fieldE (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_fieldB (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_FoutJ (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_FoutEN (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_XLEN (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_YLEN (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_ZLEN (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_ovlp (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_numvec (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_num_ele (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_grid_cache_len (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_cu_cache_length (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_Mass (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_Charge (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_SPEC (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_NUM_SPEC (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_DELTA_X (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_DELTA_Y (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_DELTA_Z (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_y_nopush_scmc_set_parameter_Deltat (openmp_split_pass_y_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_y_init (openmp_pscmc_env * pe ,openmp_split_pass_y_struct * kerstr ){
return 0 ;}
void openmp_split_pass_y_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_y_struct ));
}
int openmp_split_pass_y_get_num_compute_units (openmp_split_pass_y_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_y_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_split_pass_y_exec (openmp_split_pass_y_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_y_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_y_scmc_set_parameter_inoutput (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_xyzw (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_cu_cache (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_cu_xyzw (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_fieldE (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_fieldB (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_FoutJ (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_FoutEN (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_XLEN (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_YLEN (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_ZLEN (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_ovlp (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_numvec (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_num_ele (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_grid_cache_len (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_cu_cache_length (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_Mass (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_Charge (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_SPEC (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_NUM_SPEC (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_DELTA_X (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_DELTA_Y (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_DELTA_Z (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_y_scmc_set_parameter_Deltat (openmp_split_pass_y_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_init (openmp_pscmc_env * pe ,openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ){
return 0 ;}
void openmp_split_pass_x_vlo_sg2_nopush_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct ));
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_get_num_compute_units (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_get_xlen (){
return 1 ;}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_exec (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->LFoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_inoutput (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_xyzw (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_cu_cache (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_cu_xyzw (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_fieldE (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_fieldB (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_LFoutJ (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_FoutEN (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_XLEN (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_YLEN (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_ZLEN (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_ovlp (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_numvec (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_num_ele (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_grid_cache_len (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_cu_cache_length (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_Mass (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_Charge (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_SPEC (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_NUM_SPEC (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_DELTA_X (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_DELTA_Y (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_DELTA_Z (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_nopush_small_grids_scmc_set_parameter_Deltat (openmp_split_pass_x_vlo_sg2_nopush_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_init (openmp_pscmc_env * pe ,openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ){
return 0 ;}
void openmp_split_pass_x_vlo_sg2_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_x_vlo_sg2_small_grids_struct ));
}
int openmp_split_pass_x_vlo_sg2_small_grids_get_num_compute_units (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_x_vlo_sg2_small_grids_get_xlen (){
return 1 ;}
int openmp_split_pass_x_vlo_sg2_small_grids_exec (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_x_vlo_sg2_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->LFoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_inoutput (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_xyzw (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_cu_cache (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_cu_xyzw (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_fieldE (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_fieldB (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_LFoutJ (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_FoutEN (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_XLEN (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_YLEN (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_ZLEN (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_ovlp (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_numvec (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_num_ele (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_grid_cache_len (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_cu_cache_length (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_Mass (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_Charge (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_SPEC (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_NUM_SPEC (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_DELTA_X (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_DELTA_Y (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_DELTA_Z (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_x_vlo_sg2_small_grids_scmc_set_parameter_Deltat (openmp_split_pass_x_vlo_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_init (openmp_pscmc_env * pe ,openmp_split_pass_x_vlo_small_grids_struct * kerstr ){
return 0 ;}
void openmp_split_pass_x_vlo_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_x_vlo_small_grids_struct ));
}
int openmp_split_pass_x_vlo_small_grids_get_num_compute_units (openmp_split_pass_x_vlo_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_x_vlo_small_grids_get_xlen (){
return 1 ;}
int openmp_split_pass_x_vlo_small_grids_exec (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_x_vlo_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->LFoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_inoutput (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_xyzw (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_cu_cache (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_cu_xyzw (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_fieldE (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_fieldB (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_LFoutJ (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_FoutEN (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_XLEN (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_YLEN (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_ZLEN (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_ovlp (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_numvec (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_num_ele (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_grid_cache_len (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_cu_cache_length (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_Mass (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_Charge (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_SPEC (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_NUM_SPEC (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_DELTA_X (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_DELTA_Y (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_DELTA_Z (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_x_vlo_small_grids_scmc_set_parameter_Deltat (openmp_split_pass_x_vlo_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_init (openmp_pscmc_env * pe ,openmp_split_pass_x_sg2_small_grids_struct * kerstr ){
return 0 ;}
void openmp_split_pass_x_sg2_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_x_sg2_small_grids_struct ));
}
int openmp_split_pass_x_sg2_small_grids_get_num_compute_units (openmp_split_pass_x_sg2_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_x_sg2_small_grids_get_xlen (){
return 1 ;}
int openmp_split_pass_x_sg2_small_grids_exec (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_x_sg2_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->LFoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_inoutput (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_xyzw (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_cu_cache (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_cu_xyzw (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_fieldE (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_fieldB (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_LFoutJ (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_FoutEN (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_XLEN (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_YLEN (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_ZLEN (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_ovlp (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_numvec (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_num_ele (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_grid_cache_len (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_cu_cache_length (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_Mass (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_Charge (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_SPEC (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_NUM_SPEC (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_DELTA_X (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_DELTA_Y (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_DELTA_Z (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_x_sg2_small_grids_scmc_set_parameter_Deltat (openmp_split_pass_x_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_x_small_grids_init (openmp_pscmc_env * pe ,openmp_split_pass_x_small_grids_struct * kerstr ){
return 0 ;}
void openmp_split_pass_x_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_x_small_grids_struct ));
}
int openmp_split_pass_x_small_grids_get_num_compute_units (openmp_split_pass_x_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_x_small_grids_get_xlen (){
return 1 ;}
int openmp_split_pass_x_small_grids_exec (openmp_split_pass_x_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_x_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->LFoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_x_small_grids_scmc_set_parameter_inoutput (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_xyzw (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_cu_cache (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_cu_xyzw (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_fieldE (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_fieldB (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_LFoutJ (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_FoutEN (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_XLEN (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_YLEN (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_ZLEN (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_ovlp (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_numvec (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_num_ele (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_grid_cache_len (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_cu_cache_length (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_Mass (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_Charge (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_SPEC (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_NUM_SPEC (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_DELTA_X (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_DELTA_Y (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_DELTA_Z (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_x_small_grids_scmc_set_parameter_Deltat (openmp_split_pass_x_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_init (openmp_pscmc_env * pe ,openmp_split_pass_x_vlo_nopush_struct * kerstr ){
return 0 ;}
void openmp_split_pass_x_vlo_nopush_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_x_vlo_nopush_struct ));
}
int openmp_split_pass_x_vlo_nopush_get_num_compute_units (openmp_split_pass_x_vlo_nopush_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_x_vlo_nopush_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_split_pass_x_vlo_nopush_exec (openmp_split_pass_x_vlo_nopush_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_x_vlo_nopush_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_inoutput (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_xyzw (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_cu_cache (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_cu_xyzw (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_fieldE (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_fieldB (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_FoutJ (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_FoutEN (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_XLEN (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_YLEN (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_ZLEN (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_ovlp (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_numvec (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_num_ele (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_grid_cache_len (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_cu_cache_length (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_Mass (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_Charge (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_SPEC (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_NUM_SPEC (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_DELTA_X (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_DELTA_Y (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_DELTA_Z (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_x_vlo_nopush_scmc_set_parameter_Deltat (openmp_split_pass_x_vlo_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_x_vlo_init (openmp_pscmc_env * pe ,openmp_split_pass_x_vlo_struct * kerstr ){
return 0 ;}
void openmp_split_pass_x_vlo_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_x_vlo_struct ));
}
int openmp_split_pass_x_vlo_get_num_compute_units (openmp_split_pass_x_vlo_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_x_vlo_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_split_pass_x_vlo_exec (openmp_split_pass_x_vlo_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_x_vlo_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_x_vlo_scmc_set_parameter_inoutput (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_xyzw (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_cu_cache (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_cu_xyzw (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_fieldE (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_fieldB (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_FoutJ (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_FoutEN (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_XLEN (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_YLEN (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_ZLEN (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_ovlp (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_numvec (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_num_ele (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_grid_cache_len (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_cu_cache_length (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_Mass (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_Charge (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_SPEC (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_NUM_SPEC (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_DELTA_X (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_DELTA_Y (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_DELTA_Z (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_x_vlo_scmc_set_parameter_Deltat (openmp_split_pass_x_vlo_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_x_nopush_init (openmp_pscmc_env * pe ,openmp_split_pass_x_nopush_struct * kerstr ){
return 0 ;}
void openmp_split_pass_x_nopush_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_x_nopush_struct ));
}
int openmp_split_pass_x_nopush_get_num_compute_units (openmp_split_pass_x_nopush_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_x_nopush_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_split_pass_x_nopush_exec (openmp_split_pass_x_nopush_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_x_nopush_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_x_nopush_scmc_set_parameter_inoutput (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_xyzw (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_cu_cache (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_cu_xyzw (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_fieldE (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_fieldB (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_FoutJ (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_FoutEN (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_XLEN (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_YLEN (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_ZLEN (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_ovlp (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_numvec (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_num_ele (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_grid_cache_len (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_cu_cache_length (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_Mass (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_Charge (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_SPEC (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_NUM_SPEC (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_DELTA_X (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_DELTA_Y (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_DELTA_Z (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_x_nopush_scmc_set_parameter_Deltat (openmp_split_pass_x_nopush_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_split_pass_x_init (openmp_pscmc_env * pe ,openmp_split_pass_x_struct * kerstr ){
return 0 ;}
void openmp_split_pass_x_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_split_pass_x_struct ));
}
int openmp_split_pass_x_get_num_compute_units (openmp_split_pass_x_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_split_pass_x_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_split_pass_x_exec (openmp_split_pass_x_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_split_pass_x_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->FoutJ , ( kerstr )->FoutEN , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->Mass)[0] , ( ( kerstr )->Charge)[0] , ( ( kerstr )->SPEC)[0] , ( ( kerstr )->NUM_SPEC)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Deltat)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_split_pass_x_scmc_set_parameter_inoutput (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_xyzw (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_cu_cache (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_cu_xyzw (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_fieldE (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_fieldB (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_FoutJ (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_FoutEN (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutEN = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_XLEN (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_YLEN (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_ZLEN (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_ovlp (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_numvec (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_num_ele (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_grid_cache_len (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_cu_cache_length (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_Mass (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_Charge (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_SPEC (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->SPEC = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_NUM_SPEC (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->NUM_SPEC = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_DELTA_X (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_DELTA_Y (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_DELTA_Z (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_split_pass_x_scmc_set_parameter_Deltat (openmp_split_pass_x_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
|
parallel.h | #pragma once
namespace pbbs {
//***************************************
// All the pbbs library uses only four functions for
// accessing parallelism.
// These can be implemented on top of any scheduler.
//***************************************
// number of threads available from OS
// template <>
int num_workers();
// id of running thread, should be numbered from [0...num-workers)
int worker_id();
void set_num_workers(int n);
#ifdef SAGE
static int numanode();
#endif
// the granularity of a simple loop (e.g. adding one to each element
// of an array) to reasonably hide cost of scheduler
// #define PAR_GRANULARITY 2000
// parallel loop from start (inclusive) to end (exclusive) running
// function f.
// f should map long to void.
// granularity is the number of iterations to run sequentially
// if 0 (default) then the scheduler will decide
// conservative uses a safer scheduler
template <typename F>
static void parallel_for(long start, long end, F f, long granularity = 0,
bool conservative = false);
// runs the thunks left and right in parallel.
// both left and write should map void to void
// conservative uses a safer scheduler
template <typename Lf, typename Rf>
static void par_do(Lf left, Rf right, bool conservative = false);
template <typename A, typename Af, typename Df, typename F>
static void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start,
long end, F f, long granularity = 0,
bool conservative = false);
} // namespace pbbs
//***************************************
// cilkplus
#if defined(CILK)
#include <cilk/cilk.h>
#include <cilk/cilk_api.h>
#include <cilk/reducer.h>
#include <iostream>
#include <sstream>
#define PAR_GRANULARITY 2000
namespace pbbs {
template <typename F>
inline void parallel_for(long start, long end, F f, long granularity,
bool conservative) {
if (granularity == 0)
cilk_for(long i = start; i < end; i++) f(i);
else if ((end - start) <= granularity)
for (long i = start; i < end; i++) f(i);
else {
long n = end - start;
long mid = (start + (9 * (n + 1)) / 16);
cilk_spawn parallel_for(start, mid, f, granularity);
parallel_for(mid, end, f, granularity);
cilk_sync;
}
}
template <typename F>
inline void parallel_for_1(long start, long end, F f, long granularity,
bool conservative) {
_Pragma("cilk grainsize = 1") cilk_for(long i = start; i < end; i++) f(i);
}
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool conservative) {
cilk_spawn right();
left();
cilk_sync;
}
template <typename A>
class alloc_holder {
struct Monoid : cilk::monoid_base<A> {
static void reduce(A *left, A *right) {}
};
public:
cilk::reducer<Monoid> imp_;
alloc_holder() : imp_() {}
};
// TODO try parallel_for_1
template <typename A, typename Af, typename Df, typename F>
inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start,
long end, F f, long granularity,
bool conservative) {
alloc_holder<A> alloc;
parallel_for_1(start, end,
[&](size_t i) {
init_alloc(&alloc.imp_.view());
f(i, &(alloc.imp_.view()));
// finish_alloc(&(alloc.imp_.view()));
},
granularity, conservative);
}
inline int num_workers() { return __cilkrts_get_nworkers(); }
inline int worker_id() { return __cilkrts_get_worker_number(); }
#ifdef SAGE
inline int numanode() {
std::cout << "numanode() only supported with homegrown scheduler" << std::endl;
exit(-1);
return 1;
}
#endif
} // namespace pbbs
// openmp
#elif defined(OPENMP)
#include <omp.h>
#include <stddef.h>
#define PAR_GRANULARITY 200000
namespace pbbs {
extern bool in_par_do;
template <class F>
inline void parallel_for(long start, long end, F f, long granularity,
bool conservative) {
_Pragma("omp parallel for") for (long i = start; i < end; i++) f(i);
}
template <typename F>
inline void parallel_for_1(long start, long end, F f, long granularity,
bool conservative) {
#pragma omp for schedule(dynamic, 1) nowait
for (long i = start; i < end; i++) f(i);
}
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool conservative) {
if (!in_par_do) {
in_par_do = true; // at top level start up tasking
#pragma omp parallel
#pragma omp single
#pragma omp task
left();
#pragma omp task
right();
#pragma omp taskwait
in_par_do = false;
} else { // already started
#pragma omp task
left();
#pragma omp task
right();
}
}
template <typename Job>
inline void parallel_run(Job job, int num_threads = 0) {
job();
}
template <typename A, typename Af, typename Df, typename F>
inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start,
long end, F f, long granularity,
bool conservative) {
A* alloc = nullptr;
#pragma omp parallel private(alloc)
{
alloc = new A();
init_alloc(alloc);
parallel_for_1(start, end, [&](size_t i) { f(i, alloc); }, granularity,
conservative);
//#pragma omp for schedule(dynamic, 1) nowait
// for(long i=start; i<end; i++) f(i, alloc);
finish_alloc(alloc);
}
}
inline int num_workers() { return omp_get_max_threads(); }
inline int worker_id() { return omp_get_thread_num(); }
#ifdef SAGE
inline int numanode() {
std::cout << "numanode() only supported with homegrown scheduler" << std::endl;
exit(-1);
return 1;
}
#endif
} // namespace pbbs
// Guy's scheduler (ABP)
#elif defined(HOMEGROWN)
#include "scheduler.h"
#define PAR_GRANULARITY 512
namespace pbbs {
template <class F>
inline void parallel_for(long start, long end, F f, long granularity,
bool conservative) {
pbbs::global_scheduler.parfor(start, end, f, granularity, conservative);
}
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool conservative) {
return pbbs::global_scheduler.pardo(left, right, conservative);
}
template <typename Job>
inline void parallel_run(Job job, int num_threads = 0) {
job();
}
template <typename A, typename Af, typename Df, typename F>
inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start,
long end, F f, long granularity,
bool conservative) {
parallel_for(start, end,
[&](long i) {
static thread_local A* alloc = new A();
init_alloc(alloc);
f(i, alloc);
},
granularity, conservative);
// finish_alloc(alloc);
}
inline int num_workers() { return pbbs::global_scheduler.num_workers(); }
inline int worker_id() { return pbbs::global_scheduler.worker_id(); }
#ifdef SAGE
inline int numanode() { return pbbs::global_scheduler.numanode(); }
#endif
} // namespace pbbs
// c++
#else
#define PAR_GRANULARITY 1000
namespace pbbs {
template <class F>
inline void parallel_for(long start, long end, F f, long granularity,
bool conservative) {
for (long i = start; i < end; i++) {
f(i);
}
}
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool conservative) {
left();
right();
}
template <typename Job>
inline void parallel_run(Job job, int num_threads = 0) {
job();
}
template <typename A, typename Af, typename Df, typename F>
inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start,
long end, F f, long granularity,
bool conservative) {
A* alloc = new A();
init_alloc(alloc);
for (long i = start; i < end; i++) {
f(i, alloc);
}
finish_alloc(alloc);
}
inline int num_workers() { return 1; }
inline int worker_id() { return 0; }
} // namespace pbbs
#endif
|
SuperRayGenerator.h | /*
* Copyright(c) 2016, Youngsun Kwon, Donghyuk Kim, and Sung-eui Yoon, KAIST
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met :
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and / or other materials provided with the distribution.
* * Neither the name of SuperRay nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRIDMAP2D_SUPERRAY_SUPERRAY_GENERATOR_H
#define GRIDMAP2D_SUPERRAY_SUPERRAY_GENERATOR_H
#include <gridmap2D/gridmap2D_types.h>
#include <gridmap2D/Grid2DKey.h>
#include <gridmap2D/Pointcloud.h>
#include <gridmap2D_superray/SuperRayCloud.h>
#ifdef _OPENMP
#include <omp.h>
#pragma omp declare reduction (merge : std::vector<gridmap2D::SuperRay> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end()))
#endif
namespace gridmap2D{
class SuperRayGenerator{
public:
SuperRayGenerator(const double _resolution, const unsigned int _grid_max_val, const int _threshold = 0);
~SuperRayGenerator() {};
void GenerateSuperRay(const Pointcloud& _pc, const point2d& _origin, SuperRayCloud& _srcloud);
protected:
struct PixelInfo;
struct Axis2D;
point2d originW; // origin point in World Space
Grid2DKey originKey; // origin key
// constants for generating super rays
double RESOLUTION; // resolution
double RESOLUTION_FACTOR; // 1.0 / resolution
unsigned int GRID_MAX_VAL; // offset
unsigned int THRESHOLD; // threshold for limiting to generate super rays for each pixel
// Functions for generating super rays
void GenerateSuperRay(const point2d_collection& _pointlist, std::vector<SuperRay>& _srcloud);
void GenerateSuperRay2D(const point2d_collection& _pointlist, Axis2D& _axis, PixelInfo& _pixelinfo, std::vector<SuperRay>& _srcloud);
// Function for generating mapping line in 2-D
double GenerateMappingLine(PixelInfo& _pixelinfo, const unsigned int& _axisX, const unsigned int& _axisY, std::vector<double>& _mappingPlane);
// Utility functions
typedef unordered_ns::unordered_map<Grid2DKey, std::vector<point2d>, Grid2DKey::KeyHash> Voxelized_Pointclouds;
void ComputeAxis(const point2d& _min, const point2d& _max, Axis2D& _axis);
// Re-implmentation for Key / coordinate conversion functions
inline Grid2DKey coordToKey(const point2d& coord) const {
return Grid2DKey(coordToKey(coord(0)), coordToKey(coord(1)));
}
inline key_type coordToKey(double coordinate) const {
return ((int)floor(RESOLUTION_FACTOR * coordinate)) + GRID_MAX_VAL;
}
// Structures that represents the traversal information
struct PixelInfo{
PixelInfo(void) {};
// Pixel Info.
point2d minW; // min position of pixel
point2d maxW; // max position of pixel
Grid2DKey pixelKey; // key of pixel
};
struct Axis2D{
Axis2D(void) : axisU(0), axisV(1) {};
unsigned int axisU; // Nearest Axis
unsigned int axisV; // Farthest Axis
};
};
}
#endif
|
DRB049-fprintf-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
/*
Example use of fprintf
*/
#include <stdio.h>
#include <omp.h>
int main(int argc,char *argv[])
{
int i;
int ret;
FILE *pfile;
int len = 1000;
int A[1000];
#pragma omp parallel for private (i)
for (i = 0; i <= len - 1; i += 1) {
A[i] = i;
}
pfile = fopen("mytempfile.txt","a+");
if (pfile == ((void *)0)) {
fprintf(stderr,"Error in fopen()\n");
}
for (i = 0; i <= len - 1; i += 1) {
fprintf(pfile,"%d\n",A[i]);
}
fclose(pfile);
ret = remove("mytempfile.txt");
if (ret != 0) {
fprintf(stderr,"Error: unable to delete mytempfile.txt\n");
}
return 0;
}
|
ast-dump-openmp-critical.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test(void) {
#pragma omp critical
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-critical.c:3:1, line:6:1> line:3:6 test 'void (void)'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:17, line:6:1>
// CHECK-NEXT: `-OMPCriticalDirective {{.*}} <line:4:1, col:21>
// CHECK-NEXT: `-NullStmt {{.*}} <line:5:3>
|
GB_unop__lnot_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__lnot_int8_int8)
// op(A') function: GB (_unop_tran__lnot_int8_int8)
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__lnot_int8_int8)
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = !(z != 0) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = !(z != 0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__lnot_int8_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sxc_fmt_plug.c | /* SXC cracker patch for JtR. Hacked together during Summer of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sxc;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sxc);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "sha.h"
#include <openssl/blowfish.h>
#include "pbkdf2_hmac_sha1.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2 // tuned on core i7
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "sxc"
#define FORMAT_NAME "StarOffice .sxc"
#define FORMAT_TAG "$sxc$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME " Blowfish"
#else
#define ALGORITHM_NAME "SHA1 Blowfish 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 20
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests sxc_tests[] = {
{"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"},
{NULL}
};
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int cipher_type; // FIXME: cipher_type seems to be ignored
int checksum_type;
int iterations;
int key_size;
int iv_length;
int salt_length;
int original_length;
int length;
unsigned char iv[16];
unsigned char salt[32];
unsigned char content[1024];
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int res, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
res = atoi(p);
if (res <= 0)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* key size */
goto err;
res = atoi(p);
if (res != 16 && res != 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */
goto err;
if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv length */
goto err;
res = atoi(p);
if (res <= 0 || res > 16)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt length */
goto err;
res = atoi(p);
if (res <= 0 || res > 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* original length */
goto err;
res = atoi(p);
if (res <= 0 || res > 1024) /* 1024 because of "unsigned char output[1024];" in crypt_all */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* length */
goto err;
res = atoi(p);
if (res <= 0 || res > 1024)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* content */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if (strtokm(NULL, "*") != NULL) /* the end */
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN; /* skip over "$sxc$*" */
p = strtokm(ctcopy, "*");
cs.cipher_type = atoi(p);
p = strtokm(NULL, "*");
cs.checksum_type = atoi(p);
p = strtokm(NULL, "*");
cs.iterations = atoi(p);
p = strtokm(NULL, "*");
cs.key_size = atoi(p);
strtokm(NULL, "*");
/* skip checksum field */
p = strtokm(NULL, "*");
cs.iv_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.iv_length; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.salt_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.original_length = atoi(p);
p = strtokm(NULL, "*");
cs.length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.length; i++)
cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
ctcopy += 6; /* skip over "$sxc$*" */
strtokm(ctcopy, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
p = strtokm(NULL, "*");
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char key[MAX_KEYS_PER_CRYPT][32];
unsigned char hash[MAX_KEYS_PER_CRYPT][32];
BF_KEY bf_key;
int bf_ivec_pos;
unsigned char ivec[8];
unsigned char output[1024];
int i;
SHA_CTX ctx;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, (unsigned char *)saved_key[index+i], strlen(saved_key[index+i]));
SHA1_Final((unsigned char *)hash[i], &ctx);
}
#ifdef SIMD_COEF_32
{
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = 20;
pin[i] = (unsigned char*)hash[i];
pout[i] = key[i];
}
pbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, pout,
cur_salt->key_size, 0);
}
#else
pbkdf2_sha1(hash[0], 20, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, key[0],
cur_salt->key_size, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
bf_ivec_pos = 0;
memcpy(ivec, cur_salt->iv, 8);
BF_set_key(&bf_key, cur_salt->key_size, key[i]);
BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0);
SHA1_Init(&ctx);
SHA1_Update(&ctx, output, cur_salt->original_length);
SHA1_Final((unsigned char*)crypt_out[index+i], &ctx);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void sxc_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
struct fmt_main fmt_sxc = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
sxc_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */
},
fmt_default_salt_hash,
NULL,
set_salt,
sxc_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
pi_critical.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, const char *argv[])
{
long steps = 1000000000;
if (argc > 1)
{
steps = atoi(argv[1]);
}
else
{
printf("Use: %s [puntos]\n", argv[0]);
}
double step = 1.0 / (double)steps;
double sum = 0.0;
double start = omp_get_wtime();
#pragma omp parallel
{
int nhilos = omp_get_num_threads();
int id = omp_get_thread_num();
int num = steps / nhilos;
int inicio = id * num;
int fin = inicio + num;
if (id == (nhilos - 1))
{
fin = steps;
}
int suma_local = 0;
double x;
for (int i = inicio; i < fin; i++)
{
x = (i + 0.5) * step;
suma_local += 4.0 / (1.0 + x * x);
}
#pragma omp critical
{
sum += suma_local;
}
}
double pi = step * sum;
double delta = omp_get_wtime() - start;
printf("PI = %.16g calculado en %.4g segundos con %ld puntos\n", pi, delta, steps);
} |
dctz-comp-lib.c | /**
* @file dctz-comp-lib.c
* @author Seung Woo Son
* @date July 2019
* @brief DCTZ compression library routine
* (C) 2019 University of Massachuetts Lowell.
See LICENSE in top-level directory.
*/
#include <stdlib.h>
#include <memory.h>
#include <string.h>
#ifdef TIME_DEBUG
#include <sys/time.h>
#endif /* TIME_DEBUG */
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <pthread.h>
#include "zlib.h"
#include "dctz.h"
#include "dct.h"
#define DEF_MEM_LEVEL 8
union
{
float *f;
double *d;
} a, a_x;
void *compress_thread (void *arg)
{
z_stream *defstream = (z_stream *)arg;
#ifdef DEBUG
printf("compress started ...\n");
#endif
deflate(defstream, Z_FINISH);
#ifdef DEBUG
printf("done! compression...\n");
#endif
uLong ret = defstream->total_out;
deflateEnd(defstream);
pthread_exit((void *)ret);
}
int dctz_compress(t_var *var, int N, size_t *outSize, t_var *var_z, double error_bound)
{
int i, j, nblk, rem;
#ifdef TIME_DEBUG
struct timeval start_t, end_t, gstart_t;
double sf_t, dct_t, DC_AC_t, zlib_t, comp_t, malloc_t, genbin_t;
#endif
double *bin_maxes, *bin_center, bin_width, range_min, range_max;
t_bin_id *bin_index, *bin_indexz, *bin_indexz2;
#ifdef USE_TRUNCATE
float *DC, *DCz, *DCz2, *AC_exact, *AC_exactz, *AC_exactz2;
#else
double *DC, *DCz, *DCz2, *AC_exact, *AC_exactz, *AC_exactz2;
#endif
struct header h;
t_bstat bs;
size_t type_size = 0;
#ifdef USE_QTABLE
double *qtable; /* Quantizer Table */
#endif
if (var->datatype == DOUBLE)
type_size = sizeof(double);
else /* FLOAT */
type_size = sizeof(float);
if (var->datatype == DOUBLE) {
if (NULL == (a_x.d = (double *)malloc(N*type_size))) {
fprintf(stderr, "Out of memory: a_x\n");
exit(1);
}
}
else { /* FLOAT */
if (NULL == (a_x.f = (float *)malloc(N*type_size))) {
fprintf(stderr, "Out of memory: a_x\n");
exit(1);
}
}
if (error_bound < 1E-6) {
printf("ERROR BOUND is not acceptable");
exit(1);
}
/* TODO: bin_maxes and bin_center should be double or float? */
if (NULL == (bin_maxes = (double *)malloc(NBINS*sizeof(double)))) {
fprintf(stderr, "Out of memory: bin_maxes\n");
exit(1);
}
if (NULL == (bin_center = (double *)malloc(NBINS*sizeof(double)))) {
fprintf(stderr, "Out of memory: bin_center\n");
exit(1);
}
#ifdef DEBUG
for (i=0; i<BLK_SZ; i++) { /* show the first block */
printf("a[%d] = %e\n", i, var->buf.d[i]);
if (i%BLK_SZ == 0 && i != 0) printf("\n");
}
#endif
#ifdef USE_QTABLE
/* Start of Initialize Quantizer Table */
if (NULL == (qtable = (double *)malloc(BLK_SZ*sizeof(double)))) {
fprintf(stderr, "Out of memory: qtable\n");
exit(1);
}
for (i=0; i<BLK_SZ; i++) {
qtable[i] = 0.0;
}
if (NULL == (bin_index = (t_bin_id *)malloc(2*N*sizeof(t_bin_id)))) {
fprintf(stderr, "Out of memory: bin_index[]\n");
exit(1);
}
memset(bin_index, 0, sizeof(t_bin_id)*2*N);
#ifdef DEBUG
for (i=0; i<BLK_SZ; i++) {
printf("qtable[%d] = %e\n", i, qtable[i]);
}
#endif
/* End of Initialize Quantizer Table */
#else
if (NULL == (bin_index = (t_bin_id *)malloc(N*sizeof(t_bin_id)))) {
fprintf(stderr, "Out of memory: bin_index[]\n");
exit(1);
}
memset(bin_index, 0, sizeof(t_bin_id)*N);
#endif /* USE_QTABLE */
#ifdef TIME_DEBUG
gettimeofday(&start_t, NULL);
gstart_t = start_t;
#endif
/* determine scaling factor */
calc_data_stat(var, &bs, N);
if (var->datatype == DOUBLE) {
#ifdef DEBUG
printf("scaling factor = %f\n", bs.sf.d);
#endif
double xscale = pow(10, bs.sf.d);
/* apply scaling factor */
if (bs.sf.d != 1.0) {
#ifdef _OPENMP
#pragma omp parallel for private(i) shared(bs.sf.d)
#endif
for (i=0; i<N; i++)
var->buf.d[i] /= xscale;
}
}
else { /* FLOAT */
#ifdef DEBUG
printf("scaling factor = %f\n", bs.sf.f);
#endif
float xscale = pow(10, bs.sf.f);
/* apply scaling factor */
if (bs.sf.f != 1.0) {
#ifdef _OPENMP
#pragma omp parallel for private(i) shared(bs.sf.f)
#endif
for (i=0; i<N; i++)
var->buf.f[i] /= xscale;
}
}
#ifdef TIME_DEBUG
gettimeofday(&end_t, NULL);
sf_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday(&start_t, NULL);
#endif
/* DCT over decomposed blocks */
nblk = CEIL(N, BLK_SZ);
rem = N % BLK_SZ;
#ifdef DEBUG
printf("\nnumber of blocks = %d, remainder = %d\n", nblk, rem);
#endif
#ifdef USE_TRUNCATE
if (NULL == (DC = (float *)malloc(nblk*sizeof(float)))) {
fprintf(stderr, "Out of memory: DC[]\n");
exit(1);
}
#else
if (NULL == (DC = (double *)malloc(nblk*sizeof(double)))) {
fprintf(stderr, "Out of memory: DC[]\n");
exit(1);
}
#endif
#ifdef USE_TRUNCATE
if (NULL == (DCz = (float *)malloc(nblk*sizeof(float)))) {
fprintf(stderr, "Out of memory: DCz[]\n");
exit(1);
}
memset(DCz, 0, sizeof(float)*nblk); /* TODO: is it necessary? */
#else
if (NULL == (DCz = (double *)malloc(nblk*sizeof(double)))) {
fprintf(stderr, "Out of memory: DCz[]\n");
exit(1);
}
#endif
if (NULL == (bin_indexz = (t_bin_id *)malloc(N*sizeof(t_bin_id)))) {
fprintf(stderr, "Out of memory: bin_indexz[]\n");
exit(1);
}
memset (bin_indexz, 0, sizeof(t_bin_id)*N);
#ifdef TIME_DEBUG
gettimeofday(&end_t, NULL);
malloc_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday(&start_t, NULL);
#endif
if (var->datatype == DOUBLE)
gen_bins(bs.min.d, bs.max.d, bin_maxes, bin_center, NBINS, error_bound);
else /* FLOAT */
gen_bins(bs.min.f, bs.max.f, bin_maxes, bin_center, NBINS, error_bound);
int half = NBINS/2;
bin_width = error_bound*2*BRSF;
range_min = -(half*2+1)*(error_bound*BRSF);
range_max = (half*2+1)*(error_bound*BRSF);
#ifdef TIME_DEBUG
gettimeofday(&end_t, NULL);
genbin_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday(&start_t, NULL);
#endif
#ifdef USE_TRUNCATE
if (NULL == (AC_exact = (float *)malloc(N*sizeof(float)))) {
fprintf(stderr, "Out of memory: AC_exact\n");
exit(1);
}
memset(AC_exact, 0, sizeof(float)*N); /* TODO: is it necessary? */
#else
if (NULL == (AC_exact = (double *)malloc(N*sizeof(double)))) {
fprintf(stderr, "Out of memory: AC_exact\n");
exit(1);
}
memset(AC_exact, 0, sizeof(double)*N); /* TODO: is it necessary? */
#endif
#ifdef USE_TRUNCATE
if (NULL == (AC_exactz = (float *)malloc(N*sizeof(float)))) {
fprintf(stderr, "Out of memory: AC_exactz[]\n");
exit(1);
}
memset(AC_exactz, 0, sizeof(float)*N); /* TODO: is it necessary? */
#else
if (NULL == (AC_exactz = (double *)malloc(N*sizeof(double)))) {
fprintf(stderr, "Out of memory: AC_exactz[]\n");
exit(1);
}
memset(AC_exactz, 0, sizeof(double)*N); /* TODO: is it necessary? */
#endif
if (var->datatype == DOUBLE)
dct_init(BLK_SZ);
else /* FLOAT */
dct_init_f(BLK_SZ);
int tot_AC_exact_count = 0;
/* DCT block decomposed */
for (i=0; i<nblk; i++) { /* for each decomposed blk */
int l_blk_sz = ((i==nblk-1)&&(rem!=0))?rem:BLK_SZ;
if ((i==nblk-1) && (rem!=0)) {
if (var->datatype == DOUBLE) {
dct_finish();
dct_init(rem);
}
else { /* FLOAT */
dct_finish_f();
dct_init_f(rem);
}
}
if (var->datatype == DOUBLE)
dct_fftw(var->buf.d+i*BLK_SZ, a_x.d+i*BLK_SZ, l_blk_sz, nblk);
else /* FLOAT */
dct_fftw_f(var->buf.f+i*BLK_SZ, a_x.f+i*BLK_SZ, l_blk_sz, nblk);
#ifdef DEBUG
printf("block %d: after DCT:\n", i);
for (j=0; j<BLK_SZ && (i<3); j++){ /* show the first block only */
printf("a_x[%d] = %e \n", i*BLK_SZ+j, a_x.d[i*BLK_SZ+j]);
}
printf("\n");
#endif
#ifdef USE_TRUNCATE
DC[i] = (float)(var->datatype == DOUBLE ? a_x.d[i*BLK_SZ] : a_x.f[i*BLK_SZ]); /* save DC component in truncated*/
#else
DC[i] = (double)a_x.d[i*BLK_SZ]; /* save DC component */
#endif
bin_index[i*BLK_SZ] = NBINS; /* store as it is */
for (j=1; j<l_blk_sz; j++) {
if (var->datatype == DOUBLE) {
double item = a_x.d[i*BLK_SZ+j];
t_bin_id bin_id;
if (item < range_min || item > range_max) {
bin_id = NBINS;
#ifdef USE_QTABLE
/* The Start of Making Quantizer Table -QT applied to block coefficients */
if (fabs(item) >= qtable[j])
qtable[j] = fabs(item);
#endif /* USE_QTABLE */
}
else
bin_id = (t_bin_id)((item-range_min)/bin_width);
#ifdef DEBUG
printf("bin_id = %d\n", bin_id);
#endif
bin_index[i*BLK_SZ+j] = bin_id;
#ifdef DEBUG
printf("a_x[%d]=%e => %d\n", i*BLK_SZ+j, (double)item, bin_id);
#endif
} /* DOUBLE */
else { /* FLOAT */
float item = a_x.f[i*BLK_SZ+j];
t_bin_id bin_id;
if (item < range_min || item > range_max) {
bin_id = NBINS;
#ifdef USE_QTABLE
/* The Start of Making Quantizer Table -QT applied to block coefficients */
if (fabs(item) >= qtable[j])
qtable[j] = fabs(item);
#endif /* USE_QTABLE */
}
else
bin_id = (t_bin_id)((item-range_min)/bin_width);
#ifdef DEBUG
printf("bin_id = %d\n", bin_id);
#endif
bin_index[i*BLK_SZ+j] = bin_id;
#ifdef DEBUG
printf("a_x[%d]=%e => %d\n", i*BLK_SZ+j, (float)item, bin_id);
#endif
} /* else: FLOAT */
}
/* The End of of Making Quantizer Table */
}
if (var->datatype == DOUBLE)
dct_finish();
else /* FLOAT */
dct_finish_f();
#ifdef DCT_FILE_DEBUG
FILE *fp = fopen("dct_result.bin", "w+");
if (var->datatype == DOUBLE)
fwrite(a_x.d, sizeof(double), N, fp);
else /* FLOAT */
fwrite(a_x.f, sizeof(float), N, fp);
fclose(fp);
#endif
#ifdef USE_QTABLE
#ifdef DEBUG
printf("Quantizer Table:\n");
for (j=0; j<BLK_SZ ; j++) { /* Show Quantizer Table */
printf("before qtable[%d] = %e \n", j, qtable[j]);
}
#endif
for (j=1; j<BLK_SZ; j++) { /* Show Quantizer Table */
//if (qtable[j] < bin_maxes[NBINS-1]) {
if (qtable[j] < 1.0) {
qtable[j] = 1.0;
}
}
#ifdef DEBUG
printf("Quantizer Table:\n");
for (j=0; j<BLK_SZ ; j++) { /* Show Quantizer Table */
printf("after qtable[%d] = %e \n", j, qtable[j]);
}
#endif
#endif
unsigned int k = N;
double qt_factor = (NBINS == 255 ? 10.0 : 2000.0);
for (i=0; i<nblk; i++) {
int l_blk_sz = ((i==nblk-1)&&(rem != 0))?rem:BLK_SZ;
for (j=1; j<l_blk_sz; j++) {
t_bin_id bin_id;
bin_id = bin_index[i*BLK_SZ+j];
if (bin_id == NBINS) {
#ifdef USE_QTABLE
double item = (var->datatype == DOUBLE) ? a_x.d[i*BLK_SZ+j] : a_x.f[i*BLK_SZ+j]; /* in case of FLOAT, it will be cast to double */
/* if out of bin area, normalize it to the area from range_max/range_min to range_max/range_min +/- error_bound */
if (item < range_min) {
item = (item/qtable[j])*error_bound*qt_factor + range_min;
} else if (item > range_max) {
item = (item/qtable[j])*error_bound*qt_factor + range_max;
}
if (var->datatype == DOUBLE)
a_x.d[i*BLK_SZ+j] = item; /* update a_x with updated value */
else /* FLOAT */
a_x.f[i*BLK_SZ+j] = item; /* update a_x with updated value */
if (item < range_min || item > range_max) {
bin_id = NBINS;
#ifdef USE_TRUNCATE
AC_exact[tot_AC_exact_count++] = (float)(var->datatype == DOUBLE ? a_x.d[i*BLK_SZ+j] : a_x.f[i*BLK_SZ+j]);
#else
AC_exact[tot_AC_exact_count++] = (double)(var->datatype == DOUBLE ? a_x.d[i*BLK_SZ+j] | a_x.f[i*BLK_SZ+j]);;
#endif /* USE_TRUNCATE */
}
else
bin_id = (t_bin_id)((item-range_min)/bin_width);
bin_index[k++] = bin_id;
#ifdef DEBUG
printf("a_x[%d]=%e => %d\n", i*BLK_SZ+j, (double)item, bin_id);
#endif /* DEBUG */
#else
#ifdef USE_TRUNCATE
AC_exact[tot_AC_exact_count++] = (float)(var->datatype == DOUBLE ? a_x.d[i*BLK_SZ+j] : a_x.f[i*BLK_SZ+j]);
#else
AC_exact[tot_AC_exact_count++] = (double)(var->datatype == DOUBLE ? a_x.d[i*BLK_SZ+j] | a_x.f[i*BLK_SZ+j]); /* in case of FLOAT, casting to double makes sense? */
#endif
#endif /* USE_QTABLE */
}
}
}
#ifdef DEBUG
printf("total AC_exact_count = %d\n", tot_AC_exact_count);
#endif
#ifdef TIME_DEBUG
gettimeofday(&end_t, NULL);
dct_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday(&start_t, NULL);
#endif
free(bin_maxes);
#ifdef DEBUG
int bin_freq[NBINS+1] = {0};
i=0;
while (i < N) {
bin_freq[(int)bin_index[i++]]++;
}
printf("i=%d\n", i);
int sum = 0;
printf("bin_freq: ");
for (i=0; i<NBINS+1; i++) {
printf("%d, ", bin_freq[i]);
sum += bin_freq[i];
}
printf("sum=%d\n", sum);
#endif
#ifdef TIME_DEBUG
gettimeofday(&end_t, NULL);
DC_AC_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday(&start_t, NULL);
#endif
char bin_index_file[640];
FILE *fp_index;
sprintf(bin_index_file, "bin_index.bin");
fp_index = fopen(bin_index_file, "wb");
fwrite(bin_index, N, 1, fp_index);
fclose(fp_index);
#ifdef DEBUG
printf("tot_AC_exact_count=%d\n", tot_AC_exact_count);
#ifdef USE_QTABLE
printf("bin_index before compression = %lu\n", k*sizeof(t_bin_id));
#else
printf("bin_index before compression = %lu\n", N*sizeof(t_bin_id));
#endif
#ifdef USE_TRUNCATE
printf("DC before compression = %lu\n", nblk*sizeof(float));
printf("AC_exact before compression = %lu\n", tot_AC_exact_count*sizeof(float));
#else
printf("DC before compression = %lu\n", nblk*sizeof(double));
printf("AC_exact before compression = %lu\n", tot_AC_exact_count*sizeof(double));
#endif
#endif
pthread_t thread[3];
pthread_attr_t attr; /* thread attributes (left at defaults) */
/* set defaults (not all pthread implementations default to joinable) */
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
/* setup for compress */
z_stream defstream[3];
defstream[0].zalloc = Z_NULL;
defstream[0].zfree = Z_NULL;
defstream[0].opaque = Z_NULL;
/* compress bin_index */
#ifdef USE_QTABLE
uLong ucompSize_binindex = k*sizeof(t_bin_id);
#else
uLong ucompSize_binindex = N*sizeof(t_bin_id);
#endif
uLong compSize_binindex = compressBound(ucompSize_binindex);
int windowBits = 14;
deflateInit2(&defstream[0], 1, Z_DEFLATED, windowBits, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
defstream[0].avail_in = ucompSize_binindex;
defstream[0].next_in = (Bytef *)bin_index;
defstream[0].avail_out = compSize_binindex;
defstream[0].next_out = (Bytef *)bin_indexz;
defstream[0].data_type = Z_UNKNOWN; /* Z_ASCII, Z_BINARY, Z_UNKNOWN */
if (pthread_create(&thread[0], &attr, compress_thread, (void *)&defstream[0])) {
fprintf(stderr, "Error creating thread\n");
exit(0);
}
/* compress DC */
defstream[1].zalloc = Z_NULL;
defstream[1].zfree = Z_NULL;
defstream[1].opaque = Z_NULL;
#ifdef USE_TRUNCATE
uLong ucompSize_DC = nblk*sizeof(float);
uLong compSize_DC = compressBound(ucompSize_DC);
#else
uLong ucompSize_DC = nblk*sizeof(double);
uLong compSize_DC = compressBound(ucompSize_DC);
#endif
deflateInit2(&defstream[1], 1, Z_DEFLATED, windowBits, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
defstream[1].avail_in = ucompSize_DC;
defstream[1].next_in = (Bytef *)DC;
defstream[1].avail_out = compSize_DC;
defstream[1].next_out = (Bytef *)DCz;
defstream[1].data_type = Z_UNKNOWN;
if (pthread_create(&thread[1], &attr, compress_thread, (void *)&defstream[1])) {
fprintf(stderr, "Error creating thread\n");
exit(0);
}
/* compress AC_exact */
defstream[2].zalloc = Z_NULL;
defstream[2].zfree = Z_NULL;
defstream[2].opaque = Z_NULL;
#ifdef USE_TRUNCATE
uLong ucompSize_AC_exact = N*sizeof(float);
uLong compSize_AC_exact = compressBound(ucompSize_AC_exact);
#else
uLong ucompSize_AC_exact = N*sizeof(double);
uLong compSize_AC_exact = compressBound(ucompSize_AC_exact);
#endif
deflateInit2(&defstream[2], 1, Z_DEFLATED, windowBits, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
defstream[2].avail_in = ucompSize_AC_exact;
defstream[2].next_in = (Bytef *)AC_exact;
defstream[2].avail_out = compSize_AC_exact;
defstream[2].next_out = (Bytef *)AC_exactz;
defstream[2].data_type = Z_UNKNOWN;
if (pthread_create(&thread[2], &attr, compress_thread, (void *)&defstream[2])) {
fprintf(stderr, "Error creating thread\n");
exit(0);
}
#ifdef USE_TRUNCATE
//uLong ucompSize_AC_exact = tot_AC_exact_count*sizeof(float);
// uLong compSize_AC_exact = compressBound(ucompSize_AC_exact);
#else
//uLong ucompSize_AC_exact = tot_AC_exact_count*sizeof(double);
// uLong compSize_AC_exact = compressBound(ucompSize_AC_exact);
#endif
void *ret;
for (i=0; i<3; i++) {
pthread_join(thread[i], &ret);
#ifdef DEBUG
printf("thread %d joined\n", i);
#endif
switch (i) {
case 0:
compSize_binindex = (uLong)ret;
break;
case 1:
compSize_DC = (uLong)ret;
break;
case 2:
compSize_AC_exact = (uLong)ret;
break;
}
}
pthread_attr_destroy(&attr);
#if 0
compSize_binindex = defstream[0].total_out; /* update with actual size */
deflateEnd(&defstream[0]);
compSize_DC = defstream[1].total_out; /* update with actual size */
deflateEnd(&defstream[1]);
compSize_AC_exact_count = defstream[2].total_out; /* update with actual size */
deflateEnd(&defstream[2]);
#endif
bin_indexz2 = (t_bin_id*)realloc(bin_indexz, compSize_binindex); /* TODO: check error */
#ifdef SIZE_DEBUG
printf("Compressed bin_index size is: %lu\n", compSize_binindex);
#endif
DCz2 = realloc(DCz, compSize_DC); /* TODO: check error */
#ifdef SIZE_DEBUG
printf("Compressed DC size is: %lu\n", compSize_DC);
#endif
AC_exactz2 = realloc(AC_exactz, compSize_AC_exact); /* TODO: check error */
#ifdef SIZE_DEBUG
printf("Compressed AC_exact size is: %lu\n", compSize_AC_exact);
#endif
#ifdef TIME_DEBUG
gettimeofday(&end_t, NULL);
double comp_rate;
zlib_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
comp_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(gstart_t.tv_sec*1000000 + gstart_t.tv_usec));
comp_rate = (N*sizeof(double)/(double)(1024*1024))/(comp_t/1000000);
printf("sf_t=%f(s), dct_t=%f(s), zlib_t(compress)=%f(s)\n", sf_t/1000000, dct_t/1000000, zlib_t/1000000);
printf("malloc_t=%f(s), genbin=%f(s), DC_AC_t=%f(s)\n", malloc_t/1000000, genbin_t/1000000, DC_AC_t/1000000);
printf("comp_time = %f (s), compression rate = %f (MB/s)\n", comp_t/1000000, comp_rate);
#endif
*outSize = sizeof(struct header) + compSize_binindex + compSize_DC + compSize_AC_exact;
h.datatype = var->datatype;
h.num_elements = N;
h.error_bound = error_bound;
h.tot_AC_exact_count = tot_AC_exact_count;
if (var->datatype == DOUBLE)
h.scaling_factor.d = bs.sf.d;
else /* FLOAT */
h.scaling_factor.f = bs.sf.f;
h.bindex_sz_compressed = compSize_binindex;
h.DC_sz_compressed = compSize_DC;
h.AC_exact_sz_compressed = compSize_AC_exact;
#ifdef USE_QTABLE
h.bindex_count = k;
#endif
//h.AC_exact_count_sz_compressed = compSize_AC_exact_count;
unsigned char *cur_p;
if (var->datatype == DOUBLE)
cur_p = (unsigned char *)(var_z->buf.d);
else
cur_p = (unsigned char *)(var_z->buf.f);
memcpy(cur_p, &h, sizeof(struct header));
cur_p += sizeof(struct header);
memcpy(cur_p, bin_indexz2, compSize_binindex);
cur_p += compSize_binindex;
//memcpy (cur_p, AC_exact_countz2, compSize_AC_exact_count);
//cur_p += compSize_AC_exact_count;
memcpy(cur_p, DCz2, compSize_DC);
cur_p += compSize_DC;
memcpy(cur_p, AC_exactz2, compSize_AC_exact);
#ifdef USE_QTABLE
cur_p += compSize_AC_exact;
memcpy(cur_p, qtable, BLK_SZ*sizeof(double));
#endif /* USE_QTABLE */
if (var->datatype == DOUBLE)
free(a_x.d);
else /* FLOAT */
free(a_x.f);
free(DC);
free(DCz2);
free(bin_center);
//free(AC_exact_count);
//free(AC_exact_countz2);
free(AC_exact);
free(AC_exactz2);
free(bin_index);
free(bin_indexz2);
#ifdef USE_QTABLE
free(qtable);
#endif
#ifndef SIZE_DEBUG
printf("outSize = %zu\n", *outSize);
#endif
return(1);
}
|
grid_ao.c | /*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <complex.h>
#include "config.h"
#include "cint.h"
#include "vhf/fblas.h"
#include "gto/grid_ao_drv.h"
#define MIN(X,Y) ((X)<(Y)?(X):(Y))
#define MAX(X,Y) ((X)>(Y)?(X):(Y))
#define ALL_IMAGES 255
double CINTcommon_fac_sp(int l);
void GTOshell_eval_grid_cart(double *gto, double *ri, double *exps,
double *coord, double *alpha, double *coeff, double *env,
int l, int np, int nc, int nao, int ngrids, int bgrids);
void GTOshell_eval_grid_cart_deriv1(double *gto, double *ri, double *exps,
double *coord, double *alpha, double *coeff, double *env,
int l, int np, int nc, int nao, int ngrids, int bgrids);
void GTOshell_eval_grid_cart_deriv2(double *cgto, double *ri, double *exps,
double *coord, double *alpha, double *coeff, double *env,
int l, int np, int nc, int nao, int ngrids, int bgrids);
void GTOshell_eval_grid_cart_deriv3(double *cgto, double *ri, double *exps,
double *coord, double *alpha, double *coeff, double *env,
int l, int np, int nc, int nao, int ngrids, int bgrids);
void GTOshell_eval_grid_cart_deriv4(double *cgto, double *ri, double *exps,
double *coord, double *alpha, double *coeff, double *env,
int l, int np, int nc, int nao, int ngrids, int bgrids);
/*
* Extend the meaning of non0table: given shell ID and block ID,
* non0table is the number of images in Ls that does not vanish.
* Ls should be sorted based on the distance to center cell.
*/
void PBCnr_ao_screen(unsigned char *non0table, double *coords, int ngrids,
double *Ls, int nimgs,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
#pragma omp parallel default(none) \
shared(Ls, nimgs, coords, ngrids, non0table, atm, natm, bas, nbas, env)
{
int ib, i, j, m;
int np, nc, atm_id, bas_id;
double rr, arr, maxc;
double logcoeff[NPRIMAX];
double dr[3];
double rL[3];
double *p_exp, *pcoeff, *ratm;
#pragma omp for nowait schedule(dynamic)
for (bas_id = 0; bas_id < nbas; bas_id++) {
np = bas[NPRIM_OF+bas_id*BAS_SLOTS];
nc = bas[NCTR_OF +bas_id*BAS_SLOTS];
p_exp = env + bas[PTR_EXP+bas_id*BAS_SLOTS];
pcoeff = env + bas[PTR_COEFF+bas_id*BAS_SLOTS];
atm_id = bas[ATOM_OF+bas_id*BAS_SLOTS];
ratm = env + atm[atm_id*ATM_SLOTS+PTR_COORD];
for (j = 0; j < np; j++) {
maxc = 0;
for (i = 0; i < nc; i++) {
maxc = MAX(maxc, fabs(pcoeff[i*np+j]));
}
logcoeff[j] = log(maxc);
}
for (ib = 0; ib < nblk; ib++) {
for (m = nimgs-1; m >= 0; m--) {
rL[0] = ratm[0] + Ls[m*3+0];
rL[1] = ratm[1] + Ls[m*3+1];
rL[2] = ratm[2] + Ls[m*3+2];
for (i = ib*BLKSIZE; i < MIN(ngrids, (ib+1)*BLKSIZE); i++) {
dr[0] = coords[0*ngrids+i] - rL[0];
dr[1] = coords[1*ngrids+i] - rL[1];
dr[2] = coords[2*ngrids+i] - rL[2];
rr = dr[0]*dr[0] + dr[1]*dr[1] + dr[2]*dr[2];
for (j = 0; j < np; j++) {
arr = p_exp[j] * rr;
if (arr-logcoeff[j] < EXPCUTOFF) {
non0table[ib*nbas+bas_id] = MIN(ALL_IMAGES, m+1);
goto next_blk;
}
}
}
}
non0table[ib*nbas+bas_id] = 0;
next_blk:;
}
}
}
}
static void axpy(double complex **out, double *ao0, double complex *expLk,
int nkpts, size_t off, int ngrids, int bgrids, int ncol)
{
int i, j, ik;
double complex *out_ik;
for (ik = 0; ik < nkpts; ik++) {
out_ik = out[ik] + off;
for (j = 0; j < ncol; j++) {
for (i = 0; i < bgrids; i++) {
out_ik[j*ngrids+i] += ao0[j*BLKSIZE+i] * expLk[ik];
} }
}
}
static void set0(double complex **out,
int nkpts, size_t off, int ngrids, int bgrids, int ncol)
{
int i, j, ik;
double complex *out_ik;
for (ik = 0; ik < nkpts; ik++) {
out_ik = out[ik] + off;
for (j = 0; j < ncol; j++) {
for (i = 0; i < bgrids; i++) {
out_ik[j*ngrids+i] = 0;
} }
}
}
// grid2atm[xyz,grid_id]
static void _fill_grid2atm(double *grid2atm, double *coord, double *L,
int bgrids, int ngrids,
int *atm, int natm, int *bas, int nbas, double *env)
{
int atm_id, ig;
double *r_atm;
double rL[3];
for (atm_id = 0; atm_id < natm; atm_id++) {
r_atm = env + atm[PTR_COORD+atm_id*ATM_SLOTS];
rL[0] = r_atm[0] + L[0];
rL[1] = r_atm[1] + L[1];
rL[2] = r_atm[2] + L[2];
for (ig = 0; ig < bgrids; ig++) {
grid2atm[0*BLKSIZE+ig] = coord[0*ngrids+ig] - rL[0];
grid2atm[1*BLKSIZE+ig] = coord[1*ngrids+ig] - rL[1];
grid2atm[2*BLKSIZE+ig] = coord[2*ngrids+ig] - rL[2];
}
grid2atm += 3*BLKSIZE;
}
}
void PBCeval_sph_iter(void (*feval)(), int (*fexp)(),
int nao, int ngrids, int bgrids, size_t offao,
int param[], int *shls_slice, int *ao_loc, double *buf,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex **ao, double *coord, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ncomp = param[TENSOR];
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
const int atmstart = bas[sh0*BAS_SLOTS+ATOM_OF];
const int atmend = bas[(sh1-1)*BAS_SLOTS+ATOM_OF]+1;
const int atmcount = atmend - atmstart;
const size_t Ngrids = ngrids;
int i, k, l, m, np, nc, atm_id, bas_id, deg, dcart, di, ao_id;
size_t off;
double fac;
double *p_exp, *pcoeff, *pcoord, *pcart, *ri, *pao;
double *grid2atm = buf; // [atm_id,xyz,grid]
double *eprim = grid2atm + atmcount*3*BLKSIZE;
double *cart_gto = eprim + NPRIMAX*BLKSIZE*2;
double *aobuf = cart_gto + BLKSIZE*NCTR_CART*ncomp*param[POS_E1];
for (i = 0; i < ncomp; i++) {
off = (i*nao+ao_loc[sh0])*Ngrids + offao;
set0(ao, nkpts, offao, ngrids, bgrids, ao_loc[sh1]-ao_loc[sh0]);
}
for (m = 0; m < nimgs; m++) {
_fill_grid2atm(grid2atm, coord, Ls+m*3, bgrids, ngrids,
atm+atmstart*ATM_SLOTS, atmcount, bas, nbas, env);
for (bas_id = sh0; bas_id < sh1; bas_id++) {
np = bas[bas_id*BAS_SLOTS+NPRIM_OF];
nc = bas[bas_id*BAS_SLOTS+NCTR_OF ];
l = bas[bas_id*BAS_SLOTS+ANG_OF ];
deg = l * 2 + 1;
fac = CINTcommon_fac_sp(l);
p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP];
pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF];
atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF];
pcoord = grid2atm + (atm_id - atmstart) * 3*BLKSIZE;
ao_id = ao_loc[bas_id] - ao_loc[sh0];
if ((m < non0table[bas_id] || non0table[bas_id] == ALL_IMAGES) &&
(*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac)) {
dcart = (l+1)*(l+2)/2;
ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS];
if (l <= 1) { // s, p functions
(*feval)(aobuf, ri, eprim, pcoord, p_exp, pcoeff, env,
l, np, nc, nc*dcart, BLKSIZE, bgrids);
} else {
(*feval)(cart_gto, ri, eprim, pcoord, p_exp, pcoeff, env,
l, np, nc, nc*dcart, bgrids, bgrids);
pcart = cart_gto;
pao = aobuf;
for (i = 0; i < ncomp; i++) {
for (k = 0; k < nc; k++) {
CINTc2s_ket_sph1(pao, pcart, BLKSIZE, bgrids, l);
pao += deg * BLKSIZE;
pcart += dcart * bgrids;
}
}
}
di = nc * deg;
for (i = 0; i < ncomp; i++) {
off = (i*nao+ao_id)*Ngrids + offao;
pao = aobuf + i*di*BLKSIZE;
axpy(ao, pao, expLk+m*nkpts, nkpts, off, ngrids, bgrids, di);
}
}
}
}
}
int GTOshloc_by_atom(int *shloc, int *shls_slice, int *ao_loc, int *atm, int *bas);
/*
* blksize <= 1024 to avoid stack overflow
*
* non0table[ngrids/blksize,natm] is the T/F table for ao values to
* screen the ao evaluation for each shell
*/
void PBCeval_loop(void (*fiter)(), void (*feval)(), int (*fexp)(),
int ngrids, int param[], int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex **ao, double *coord, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int shloc[shls_slice[1]-shls_slice[0]+1];
const int nshblk = GTOshloc_by_atom(shloc, shls_slice, ao_loc, atm, bas);
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
const size_t Ngrids = ngrids;
#pragma omp parallel default(none) \
shared(fiter, feval, fexp, param, ngrids, \
Ls, nimgs, expLk, nkpts, shls_slice, ao_loc, \
ao, coord, non0table, atm, natm, bas, nbas, env, shloc)
{
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
const int nao = ao_loc[sh1] - ao_loc[sh0];
int ip, ib, k, iloc, ish;
size_t aoff;
int ncart = NCTR_CART * param[TENSOR] * param[POS_E1];
double *buf = malloc(sizeof(double) * BLKSIZE*(NPRIMAX*2+ncart*2));
#pragma omp for nowait schedule(static)
for (k = 0; k < nblk*nshblk; k++) {
iloc = k / nblk;
ish = shloc[iloc];
ib = k - iloc * nblk;
ip = ib * BLKSIZE;
aoff = (ao_loc[ish] - ao_loc[sh0]) * Ngrids + ip;
(*fiter)(feval, fexp, nao, ngrids, MIN(ngrids-ip, BLKSIZE), aoff,
param, shloc+iloc, ao_loc, buf, Ls, nimgs, expLk, nkpts,
ao, coord+ip, non0table+ib*nbas,
atm, natm, bas, nbas, env);
}
free(buf);
}
}
void PBCeval_sph_drv(void (*feval)(), int (*fexp)(),
int ngrids, int param[], int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex **ao, double *coord, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
PBCeval_loop(PBCeval_sph_iter, feval, fexp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, non0table, atm, natm, bas, nbas, env);
}
void PBCval_sph_deriv0(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex **ao, double *coord, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 1};
PBCeval_sph_drv(GTOshell_eval_grid_cart, GTOcontract_exp0,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, non0table, atm, natm, bas, nbas, env);
}
void PBCval_sph_deriv1(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex **ao, double *coord, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 4};
PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv1, GTOcontract_exp1,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, non0table, atm, natm, bas, nbas, env);
}
void PBCval_sph_deriv2(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex **ao, double *coord, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 10};
PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv2, GTOprim_exp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, non0table, atm, natm, bas, nbas, env);
}
void PBCval_sph_deriv3(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex **ao, double *coord, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 20};
PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv3, GTOprim_exp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, non0table, atm, natm, bas, nbas, env);
}
void PBCval_sph_deriv4(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex **ao, double *coord, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 35};
PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv4, GTOprim_exp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, non0table, atm, natm, bas, nbas, env);
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-5,6)),ceild(8*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(4*t1+Ny+5,24)),floord(8*t2+Ny+4,24)),floord(8*t1-8*t2+Nz+Ny+3,24));t3++) {
for (t4=max(max(max(0,ceild(t1-7,8)),ceild(8*t2-Nz-28,32)),ceild(24*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(4*t1+Nx+5,32)),floord(8*t2+Nx+4,32)),floord(24*t3+Nx+20,32)),floord(8*t1-8*t2+Nz+Nx+3,32));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),24*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),24*t3+22),32*t4+30),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
defaultmap-1.c | void
foo (void)
{
#pragma omp target defaultmap(alloc) defaultmap(alloc) /* { dg-error "too many 'defaultmap' clauses with unspecified category" } */
;
#pragma omp target defaultmap(to) defaultmap(from) /* { dg-error "too many 'defaultmap' clauses with unspecified category" } */
;
#pragma omp target defaultmap(tofrom) defaultmap(firstprivate:scalar) /* { dg-error "too many 'defaultmap' clauses with 'scalar' category" } */
;
#pragma omp target defaultmap(none:aggregate) defaultmap(alloc:scalar) defaultmap(none:scalar) /* { dg-error "too many 'defaultmap' clauses with 'scalar' category" } */
;
#pragma omp target defaultmap(none : pointer) defaultmap ( none ) /* { dg-error "too many 'defaultmap' clauses with 'pointer' category" } */
;
#pragma omp target defaultmap() /* { dg-error "expected" } */
;
#pragma omp target defaultmap(for) /* { dg-error "expected" } */
;
#pragma omp target defaultmap(blah) /* { dg-error "expected" } */
;
#pragma omp target defaultmap(tofrom:) /* { dg-error "expected" } */
;
#pragma omp target defaultmap(tofrom scalar) /* { dg-error "expected" } */
;
#pragma omp target defaultmap(tofrom,scalar) /* { dg-error "expected" } */
;
#pragma omp target defaultmap(default ;) /* { dg-error "expected" } */
;
#pragma omp target defaultmap(default : qux) /* { dg-error "expected" } */
;
}
|
server.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <omp.h>
#include "names.h"
#include "database.h"
/* Creation new socket for current thread */
int create_soket(int thr)
{
struct sockaddr_un sock_addr;
int sockfd;
sockfd = socket(PF_UNIX, SOCK_DGRAM, 0);
if (sockfd < 0) {
perror("Socket creation failed!\n");
return -1;
}
sock_addr.sun_family = AF_UNIX;
sprintf(sock_addr.sun_path, "%s%d", SERVER_SOCKET_FILE, thr);
unlink(sock_addr.sun_path);
if (bind(sockfd, (struct sockaddr *) &sock_addr,
sizeof(sock_addr)) < 0) {
perror("Socket binding failed!\n");
close(sockfd);
return -1;
}
return sockfd;
}
/* Wrappers for call database functions */
/* Call of DB functions and send of ansvers to client */
void server_functs(struct mydb_t *db, char c_type, char **arg, int sockfd, struct sockaddr_un *addr, socklen_t addr_len)
{
switch (c_type) {
case C_PUT:
{
char res = mydb_put(db, arg[0], arg[1]) ? 0 : 1;
sendto(sockfd, &res, 1, 0, (struct sockaddr *) addr, addr_len);
break;
}
case C_GET:
{
const char *val = mydb_get(db, arg[0]);
int count = !val ? 0 : 1;
sendto(sockfd, &count, sizeof(int), 0,
(struct sockaddr *) addr, addr_len);
if (count) {
int buf_size = strlen(val) + 1;
sendto(sockfd, &buf_size, sizeof(int), 0,
(struct sockaddr *) addr, addr_len);
sendto(sockfd, val, buf_size, 0,
(struct sockaddr *) addr, addr_len);
}
break;
}
case C_LIST:
{
size_t count;
const char **list = mydb_list(db, &count);
sendto(sockfd, &count, sizeof(int), 0,
(struct sockaddr *) addr, addr_len);
for (size_t i = 0; i < count; i++) {
int buf_size = strlen(list[i]) + 1;
sendto(sockfd, &buf_size, sizeof(int), 0,
(struct sockaddr *) addr, addr_len);
sendto(sockfd, list[i], buf_size, 0,
(struct sockaddr *) addr, addr_len);
}
free(list);
break;
}
case C_ERACE:
{
char res = mydb_erase(db, arg[0]) ? 0 : 1;
sendto(sockfd, &res, 1, 0, (struct sockaddr *) addr, addr_len);
break;
}
case C_EXIT:
{
mydb_close(db);
char res = 1;
sendto(sockfd, &res, 1, 0, (struct sockaddr *) addr, addr_len);
if (sockfd >= 0)
close(sockfd);
exit(EXIT_SUCCESS);
}
}
}
int is_read(const char c_type)
{
if (c_type == C_LIST || c_type == C_GET)
return 1;
return 0;
}
int main(int argc, const char *argv[])
{
/* Database init */
struct mydb_t *db = mydb_init(FILE_DATA, FILE_MDATA, 1);
if (!db) {
printf("DB not created\n");
return 1;
}
omp_lock_t lock;
omp_init_lock(&lock);
/* Count of working read functions at the moment */
int read_f = 0;
/* Init of parallel region */
#pragma omp parallel num_threads(SERVER_NUM_THREADS)
{
int thr = omp_get_thread_num();
/* Server init */
int sockfd ;
if ((sockfd = create_soket(thr)) < 0) {
exit(EXIT_FAILURE);
}
struct sockaddr_un from_addr;
socklen_t sockaddr_len = sizeof(struct sockaddr_un);
char c_type, ibuf[20], *arg[2];
int arg_len[2];
for(;;) {
/* Get info about future request */
recvfrom(sockfd, ibuf, 20, 0,
(struct sockaddr *) &from_addr, &sockaddr_len);
/* Decode the info */
c_type = (unsigned char) ibuf[0];
arg_len[0] = *((int*)&ibuf[1]);
arg_len[1] = *((int*)&ibuf[1 + sizeof(int)]);
/* Get request */
for (int i = 0; i < 2 && arg_len[i] > 0; i++) {
arg[i] = malloc(arg_len[i]);
recvfrom(sockfd, arg[i], arg_len[i], 0,
(struct sockaddr *) &from_addr, &sockaddr_len);
}
/* Request to DB */
omp_set_lock(&lock);
if (is_read(c_type)) {
/* If it's just read function then free the lock */
omp_unset_lock(&lock);
/* Increment the count of working read functions */
#pragma omp atomic
read_f++;
} else {
/* If it's write function then
* wait for complete all of read function
*/
while (read_f)
sleep(10);
}
server_functs(db, c_type, arg, sockfd,
&from_addr, sockaddr_len);
if (is_read(c_type)) {
/* Decrement the count of working read functions */
#pragma omp atomic
read_f--;
} else {
/* Free the lock */
omp_unset_lock(&lock);
}
for (int i = 0; i < 2 && arg_len[i] > 0; i++)
free(arg[i]);
}
}
return EXIT_SUCCESS;
}
|
primes.c | /*
* primes.c: Example of prime numbers counting in OpenMP.
*
* (C) 2015 Mikhail Kurnosov <mkurnosov@gmail.com>
*/
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
const int a = 1;
const int b = 10000000;
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
/*
* is_prime_number: Returns 1 if n is a prime number and 0 otherwise.
* This function uses trial division primality test.
*/
int is_prime_number(int n)
{
int limit = sqrt(n) + 1;
for (int i = 2; i <= limit; i++) {
if (n % i == 0)
return 0;
}
return (n > 1) ? 1 : 0;
}
int count_prime_numbers(int a, int b)
{
int nprimes = 0;
/* Count '2' as a prime number */
if (a <= 2) {
nprimes = 1;
a = 2;
}
/* Shift 'a' to odd number */
if (a % 2 == 0)
a++;
/* Loop over odd numbers: a, a + 2, a + 4, ... , b */
for (int i = a; i <= b; i += 2) {
if (is_prime_number(i))
nprimes++;
}
return nprimes;
}
int count_prime_numbers_omp(int a, int b)
{
int nprimes = 0;
/* Count '2' as a prime number */
if (a <= 2) {
nprimes = 1;
a = 2;
}
/* Shift 'a' to odd number */
if (a % 2 == 0)
a++;
#pragma omp parallel
{
int nloc = 0;
/* Loop over odd numbers: a, a + 2, a + 4, ... , b */
#pragma omp for
for (int i = a; i <= b; i += 2) {
if (is_prime_number(i))
nloc++;
}
#pragma omp atomic
nprimes += nloc;
}
return nprimes;
}
double run_serial()
{
double t = wtime();
int n = count_prime_numbers(a, b);
t = wtime() - t;
printf("Result (serial): %d\n", n);
return t;
}
double run_parallel()
{
double t = wtime();
int n = count_prime_numbers_omp(a, b);
t = wtime() - t;
printf("Result (parallel): %d\n", n);
return t;
}
int main(int argc, char **argv)
{
printf("Count prime numbers on [%d, %d]\n", a, b);
double tserial = run_serial();
double tparallel = run_parallel();
printf("Execution time (serial): %.6f\n", tserial);
printf("Execution time (parallel): %.6f\n", tparallel);
printf("Speedup: %.2f\n", tserial / tparallel);
return 0;
}
|
zlauum.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_lauum
*
* Computes the product U * U^H or L^H * L, where the triangular
* factor U or L is stored in the upper or lower triangular part of
* the array A.
*
* If uplo = 'U' or 'u' then the upper triangle of the result is stored,
* overwriting the factor U in A.
* If uplo = 'L' or 'l' then the lower triangle of the result is stored,
* overwriting the factor L in A.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] pA
* On entry, the triangular factor U or L.
* On exit, if UPLO = 'U', the upper triangle of A is
* overwritten with the upper triangle of the product U * U^H;
* if UPLO = 'L', the lower triangle of A is overwritten with
* the lower triangle of the product L^H * L.
* The diagonal is assumed to be real with no imaginary part.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit.
* @retval < 0 if -i, the i-th argument had an illegal value.
*
*******************************************************************************
*
* @sa plasma_clauum
* @sa plasma_dlauum
* @sa plasma_slauum
*
******************************************************************************/
int plasma_zlauum(plasma_enum_t uplo, int n,
plasma_complex64_t *pA, int lda)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (uplo != PlasmaUpper && uplo != PlasmaLower) {
plasma_error("illegal value of uplo");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -4;
}
// quick return
if (imax(n, 0) == 0)
return PlasmaSuccess;
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrix.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Create sequence.
plasma_sequence_t *sequence = NULL;
retval = plasma_sequence_create(&sequence);
if (retval != PlasmaSuccess) {
plasma_error("plasma_sequence_create() failed");
return retval;
}
// Initialize request.
plasma_request_t request = PlasmaRequestInitializer;
// Asynchronous block.
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, sequence, &request);
// Call the tile async function.
plasma_omp_zlauum(uplo, A, sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(A, pA, lda, sequence, &request);
}
// Implicit synchronization.
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence->status;
plasma_sequence_destroy(sequence);
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_lauum
* Computes the product U * U^H or L^H * L, where the
* triangular factor U or L is stored in the upper or lower triangular part of
* the array A.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
*
* @param[in] A
* Descriptor of matrix A.
*
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zlauum
* @sa plasma_omp_zlauum
* @sa plasma_omp_dlauum
* @sa plasma_omp_clauum
* @sa plasma_omp_slauum
*
******************************************************************************/
void plasma_omp_zlauum(plasma_enum_t uplo, plasma_desc_t A,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Quick return
if (A.n == 0)
return;
// Call the parallel function.
plasma_pzlauum(uplo, A, sequence, request);
}
|
sumProductSparseC.c | #include <mex.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
double* prediction = mxGetPr(prhs[0]); /* containts evaluated appearance models for all pixel in that BScan and all boundary models */
double* mu = mxGetPr(prhs[1]);
double* WML = mxGetPr(prhs[2]);
double sigmaML = (double) mxGetScalar(prhs[3]);
int* idxA = (int*) mxGetData(prhs[4]);
double *hashTable = mxGetPr(prhs[5]);
int* boundsPred = (int*) mxGetData(prhs[6]);
/* int* idxRows = (int*) mxGetPr(prhs[4]); */
int M = mxGetN(prhs[2]); /* number of eigenmodes of the shape prior */
int N = mxGetM(prhs[2]); /* number of rows in WML */
int numColumns = mxGetNumberOfElements(prhs[4]); /* for each column in the shape prior model there is one index */
const int* dims = mxGetDimensions(prhs[0]);
int numBounds = dims[1]; /* number of columns of pObs */
int numRows = mxGetM(prhs[0]); /* number of rows of pObs */
int numColumnsShape = (int) (double)N/(double)numBounds; /* for each column in the shape prior model there is one index */
/* only calculate pairiwse probabilities up to this precision */
double eps = pow(10,-25);
double* gamma = NULL;
/* return the probabilities for this B-Scan column */
int dimArray[3] = {numRows,numBounds,numColumns};
plhs[0] = mxCreateNumericArray(3,dimArray,mxDOUBLE_CLASS,mxREAL);
gamma = mxGetPr(plhs[0]);
#pragma omp parallel
{
double var_a_b, factor, mu_a, mu_b, prec_a_b, prec_a_a, evalDens, aTilde;
double variance, varInv;
double* prec = malloc(2*(numBounds-1)*sizeof(double));
double var1, var2, var3;
double* alpha = malloc(numBounds*numRows*sizeof(double));
double* beta = malloc(numBounds*numRows*sizeof(double));
double* c = malloc(numBounds*sizeof(double));
double cInv, cTmp;
double* prodTmp = malloc(numRows*sizeof(double));
int rowStart, rowEnd;
int boundA, boundB;
int numNotZero;
int i,k,idx,column,predOffset;
int muFloor, startVal, stopVal;
double mu_b_a, mu_a_b, tmpVal, prec_a_aaTilde;
#pragma omp for
for (column=0; column < numColumns; column++) {
/* for (column = 0; column < 1; column++) {*/
rowStart = boundsPred[column*2]; rowEnd = boundsPred[column*2+1];
memset(alpha,0,numRows*numBounds*sizeof(double));
memset(beta,0,numRows*numBounds*sizeof(double));
predOffset = numBounds*numRows*column; /* shifts the pointer inside the prediction matrix to the next BScan column */
/* calculate for the first boundary; calculate the 1-d shape marginal p(a) for column j on the fly */
/* calculate the variance for the shape prior density: \Sigma = WW^T + sigma^2I */
variance = 0;
for (i=0; i < M; i++) {
variance += WML[idxA[column] + i*N]*WML[idxA[column] + i*N];
}
variance += sigmaML; varInv = -0.5/variance;
factor = 1/sqrt(2*3.1415926535897*variance);
cTmp = 0;
for (i=rowStart; i < rowEnd+1; i++) {
alpha[i] = factor*exp(varInv*(i+1 - mu[idxA[column]])*(i+1-mu[idxA[column]]))*prediction[i + predOffset];
cTmp += alpha[i];
}
c[0] = cTmp; cInv = 1/cTmp;
for (i=rowStart; i < rowEnd+1; i++) {
alpha[i] = alpha[i]*cInv;
}
/* calculate the precision matrices required for conditional densities p(a|b) */
for (k=0; k < numBounds-1; k++) {
var1 = 0; var2 = 0; var3 = 0;
for (i=0; i < M; i++) {
var1 += WML[idxA[column] + numColumnsShape*k + i*N]*WML[idxA[column] + numColumnsShape*k + i*N];
var2 += WML[idxA[column] + numColumnsShape*k + i*N]*WML[idxA[column] + numColumnsShape*(k+1) + i*N];
var3 += WML[idxA[column] + numColumnsShape*(k+1) + i*N]*WML[idxA[column] + numColumnsShape*(k+1) + i*N];
}
var1 += sigmaML;
var3 += sigmaML;
factor = 1/(var1*var3 - var2*var2);
prec[0 + k*2] = factor*(-var2);
prec[1 + k*2] = factor*var1;
}
/* calculate the remaining boundaries */
for (k=1; k < numBounds; k++) {
/* calculate parameters of distribution p(b|a); b is the boundary k-1; a is boundary k */
var_a_b = 1/prec[1 + (k-1)*2]; /* for the conditional density, variance is given by the inverse precision matrix */
factor = 1/sqrt(2*3.1415926535897*var_a_b);
/* calulate the number of elements larger than eps for each row */
cTmp = 0;
mu_b = mu[idxA[column] + numColumnsShape*(k-1)]; mu_a = mu[idxA[column] + numColumnsShape*k]; prec_a_b = prec[0 + (k-1)*2]; prec_a_a = prec[1 + (k-1)*2]; aTilde = prec_a_b/prec_a_a;
prec_a_aaTilde = 0.5*(1/prec_a_a)*prec_a_b*prec_a_b;
numNotZero = (int) ceil(abs(sqrt(-log(eps*factor)*2*var_a_b)/aTilde));
/* value of boundary k */
for (boundA = rowStart+1; boundA <= rowEnd; boundA++) {
mu_b_a = ((mu_a - boundA)/aTilde + mu_b);
muFloor = (int) mu_b_a;
/* the position of boundary k-1 can lie between 1 and boundary k and is constrained to lie within 2*numNotZero + 1 around its mean mu_b_a */
startVal = (muFloor-numNotZero < rowStart+1) ? rowStart+1 : muFloor - numNotZero;
stopVal = (muFloor+numNotZero > boundA) ? boundA : muFloor + numNotZero;
tmpVal = 0;
/* value of boundary k-1 */
for (boundB = startVal; boundB <= stopVal; boundB++) {
/* evaluate the conditional density; but only the part inside the exp function */
evalDens = -(boundB-mu_b_a)*(boundB-mu_b_a)*prec_a_aaTilde;
evalDens = hashTable[(int)(-evalDens*1000 + 0.5)];
tmpVal += factor*evalDens*alpha[boundB-1+numRows*(k-1)];
}
alpha[boundA-1+numRows*k] = tmpVal*prediction[boundA-1+numRows*k + predOffset];
cTmp += alpha[boundA-1+numRows*k];
}
/* normalize alpha */
c[k] = cTmp; cInv = 1/cTmp;
for (i=rowStart; i < rowEnd+1; i++) {
alpha[i+numRows*k] = alpha[i+numRows*k]*cInv;
}
}
/* initialize beta */
for (i=rowStart; i < rowEnd+1; i++) {
idx = (numBounds-1)*numRows+i;
beta[idx] = 1;
gamma[idx + predOffset] = alpha[idx]*beta[idx];
}
for (k=numBounds-1; k > 0; k--) {
/* precalculate the product of pObs*beta(z_{n+1}) */
cInv = 1/c[k];
for (i=rowStart; i < rowEnd+1; i++) {
prodTmp[i] = cInv*prediction[numRows*k+i + predOffset]*beta[numRows*k+i];
}
var_a_b = 1/prec[1 + (k-1)*2]; /* for the conditional density, variance is given by the inverse precision matrix */
factor = 1/sqrt(2*3.1415926535897*var_a_b);
/* calulate the number of elements larger than eps for each row */
numNotZero = (int) ceil(sqrt(-log(eps*factor)*2*var_a_b));
cTmp = 0;
mu_b = mu[idxA[column] + numColumnsShape*(k-1)]; mu_a = mu[idxA[column] + numColumnsShape*k]; prec_a_b = prec[0 + (k-1)*2]; prec_a_a = prec[1 + (k-1)*2]*0.5;
for (boundB = rowStart+1; boundB <= rowEnd; boundB++) {
mu_a_b = mu_a - var_a_b*prec_a_b*(boundB-mu_b);
muFloor = (int) mu_a_b;
/* position of boundary k (called here boundA) */
/* check for all possible values of boundB in this row: has to be at least boundA, can be at most numRows and we limit it to be not further away from muFloor than numNotZero */
startVal = (muFloor-numNotZero < boundB) ? boundB : muFloor - numNotZero;
stopVal = (muFloor+numNotZero > rowEnd) ? rowEnd : muFloor + numNotZero;
tmpVal = 0;
for (boundA = startVal; boundA <= stopVal; boundA++) {
evalDens = -(boundA-mu_a_b)*(boundA-mu_a_b)*prec_a_a;
evalDens = hashTable[(int)(-evalDens*1000 + 0.5)];
tmpVal += factor*evalDens*prodTmp[boundA-1];
}
idx = boundB-1+numRows*(k-1);
beta[idx] = tmpVal;
gamma[idx + predOffset] = beta[idx]*alpha[idx];
}
}
}
free(alpha); free(beta); free(prodTmp); free(prec); free(c);
}
}
|
detector.c | #include "darknet.h"
#include <stdio.h>
#include <dirent.h>
#include <unistd.h>
#include <sys/stat.h>
struct stat st;
static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90};
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
srand(time(0));
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network **nets = calloc(ngpus, sizeof(network*));
srand(time(0));
int seed = rand();
int i;
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
if(gpu_index >= 0){
opencl_set_device(i);
}
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
#ifndef BENCHMARK
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
#endif
data train, buffer;
layer l = net->layers[net->n - 1];
int classes = l.classes;
float jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = get_base_args(net);
args.coords = l.coords;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
//args.type = INSTANCE_DATA;
args.threads = 64;
pthread_t load_thread = load_data(args);
#ifdef LOSS_ONLY
double time=what_time_is_it_now();
#else
double time;
#endif
int count = 0;
if(count == 0) {
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s.start.conv.weights", backup_directory, base);
save_weights(net, buff);
}
int max_size = ((net->w + net->h)/2);
//while(i*imgs < N*120){
while(get_current_batch(net) < net->max_batches){
if(l.random && count++%10 == 0){
#if !defined(BENCHMARK) && !defined(LOSS_ONLY)
printf("Resizing\n");
#endif
int dim = max_size - ((rand() % 8) * 32);
#ifdef BENCHMARK
dim = 608;
#endif
if (get_current_batch(net)+200 > net->max_batches) dim = max_size;
if (net->w < dim || net->h < dim) dim = max_size;
#if !defined(BENCHMARK) && !defined(LOSS_ONLY)
printf("%d\n", dim);
#endif
args.w = dim;
args.h = dim;
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
#pragma omp parallel for
for(i = 0; i < ngpus; ++i){
resize_network(nets[i], dim, dim);
}
net = nets[0];
}
#ifndef LOSS_ONLY
time=what_time_is_it_now();
#endif
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
*/
/*
int zz;
for(zz = 0; zz < train.X.cols; ++zz){
image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[zz] + k*5, 1);
printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 1, 1,0,0);
}
show_image(im, "truth11");
cvWaitKey(0);
save_image(im, "truth11");
}
*/
#ifndef LOSS_ONLY
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
#endif
#ifndef LOSS_ONLY
time=what_time_is_it_now();
#endif
float loss = 0;
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus == 1) {
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
}
else {
loss = train_network(net, train);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
#ifdef LOSS_ONLY
printf("%lf\t%f\n", what_time_is_it_now()-time, loss);
#else
printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
#endif
#ifdef GPU
if (loss != loss && gpu_index >= 0) {
opencl_deinit(gpusg, ngpusg);
}
#endif
if(loss != loss) { printf("NaN LOSS detected! No possible to continue!\n"); exit(-7); }
if(i%100==0){
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
if(i%10000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
#ifdef GPU_STATS
opencl_dump_mem_stat();
#endif
#ifdef BENCHMARK
break;
#endif
}
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
free(paths);
free(plist);
free(base);
free(nets);
free(options);
}
static int get_coco_image_id(char *filename)
{
char *p = strrchr(filename, '/');
char *c = strrchr(filename, '_');
if(c) p = c;
return atoi(p+1);
}
static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h)
{
int i, j;
int image_id = get_coco_image_id(image_path);
for(i = 0; i < num_boxes; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
float bx = xmin;
float by = ymin;
float bw = xmax - xmin;
float bh = ymax - ymin;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]);
}
}
}
void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1;
if (xmin < 1) xmin = 1;
if (ymin < 1) ymin = 1;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j],
xmin, ymin, xmax, ymax);
}
}
}
void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
int class = j;
if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class],
xmin, ymin, xmax, ymax);
}
}
}
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
image input = make_image(net->w, net->h, net->c*2);
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1);
flip_image(val_resized[t]);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1);
network_predict(net, input.data);
int w = val[t].w;
int h = val[t].h;
int num = 0;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
if (coco){
print_cocos(fp, path, dets, num, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h);
} else {
print_detector_detections(fps, id, dets, num, classes, w, h);
}
free_detections(dets, num);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
float *X = val_resized[t].data;
network_predict(net, X);
int w = val[t].w;
int h = val[t].h;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
if (coco){
print_cocos(fp, path, dets, nboxes, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h);
} else {
print_detector_detections(fps, id, dets, nboxes, classes, w, h);
}
free_detections(dets, nboxes);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector_recall(char *datacfg, char *cfgfile, char *weightfile)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
//list *plist = get_paths("data/coco_val_5k.list");
list *options = read_data_cfg(datacfg);
char *test_images = option_find_str(options, "test", "data/test.list");
list *plist = get_paths(test_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int j, k;
int m = plist->size;
int i=0;
float thresh = .001;
float iou_thresh = .5;
float nms = .4;
int total = 0;
int correct = 0;
int proposals = 0;
float avg_iou = 0;
for(i = 0; i < m; ++i){
char *path = paths[i];
image orig = load_image_color(path, 0, 0);
image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < nboxes; ++k){
if(dets[k].objectness > thresh){
++proposals;
}
}
for (j = 0; j < num_labels; ++j) {
++total;
box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < l.w*l.h*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
if(dets[k].objectness > thresh && iou > best_iou){
best_iou = iou;
}
}
avg_iou += best_iou;
if(best_iou > iou_thresh){
++correct;
}
}
fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total);
free(id);
free_image(orig);
free_image(sized);
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
float nms=.45;
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input,0,0);
int resize = im.w != net->w || im.h != net->h;
image sized = resize ? letterbox_image(im, net->w, net->h) : im;
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n-1];
float *X = sized.data;
time=what_time_is_it_now();
if (l.type == DETECTION || l.type == REGION || l.type == YOLO) {
network_predict(net, X);
}
if (l.type == YOLO4) {
network_predict_y4(net, X);
}
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
int nboxes = 0;
detection *dets = 0;
if (l.type == DETECTION || l.type == REGION || l.type == YOLO) {
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
}
if (l.type == YOLO4) {
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
}
//printf("%d\n", nboxes);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
if (l.type == DETECTION || l.type == REGION || l.type == YOLO) {
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes, 0);
}
if (l.type == YOLO4) {
draw_detections_v3(im, dets, nboxes, thresh, names, alphabet, l.classes, 0);
}
free_detections(dets, nboxes);
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
show_image(im, "predictions", 0);
#endif
}
free_image(im);
if (resize) free_image(sized);
if (filename) break;
}
}
int exists(const char *fname, const char* ext)
{
FILE *file;
if (strstr(fname, ext) && (file = fopen(fname, "r")))
{
fclose(file);
return 1;
}
return 0;
}
int empty(char *dirname) {
int n = 0;
struct dirent *d;
DIR *dir = opendir(dirname);
if (dir == NULL) // not a dir or doesn't exist
return 1;
while ((d = readdir(dir)) != NULL) {
if(++n > 2)
break;
}
closedir(dir);
if (n <= 2) //dir empty
return 1;
else
return 0;
}
void test_ddetector(char *datacfg, char *cfgfile, char *weightfile, char *in_dir, float thresh, float hier_thresh, char *out_dir)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
float nms=.45;
char fname[256];
char ffname[1024];
char ffoname[1024];
struct dirent *de = NULL;
while(1) {
while (empty(in_dir)) {
usleep(100);
}
DIR *dr = opendir(in_dir);
while ((de = readdir(dr)) != NULL) {
printf("%s\n", de->d_name);
strcpy(fname, de->d_name);
strcpy(ffname, in_dir);
strcat(ffname, "/");
strcat(ffname, fname);
if (!exists(ffname, ".jpg")) continue;
if (1) {
strcpy(ffoname, out_dir);
strcat(ffoname, "/");
strcat(ffoname, fname);
int len = strlen(ffoname) - 4;
ffoname[len] = '\0';
strncpy(input, ffname, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if (!input) continue;
strtok(input, "\n");
}
off_t size = 0;
off_t offs = 0;
do {
offs = size;
stat(input, &st);
size = st.st_size;
if (offs != size) usleep(10); else break;
} while (1);
image im = load_image_color(input, 0, 0);
int resize = im.w != net->w || im.h != net->h;
image sized = resize ? letterbox_image(im, net->w, net->h) : im;
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n - 1];
float *X = sized.data;
time = what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now() - time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
//printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes, 0);
free_detections(dets, nboxes);
free_image(im);
if (resize) free_image(sized);
// if (filename) break;
remove(input);
}
closedir(dr);
}
}
/*
void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
float nms = .45;
while(1){
image in = get_image_from_stream_cv(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = in_s.data;
network_predict(net, X);
int nboxes = 0;
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int left = b.x-b.w/2.;
int top = b.y-b.h/2.;
censor_image(in, left, top, b.w, b.h);
}
}
show_image(in, base);
cvWaitKey(10);
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream_cv(cap);
free_image(in);
}
}
#endif
}
void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
int count = 0;
float nms = .45;
while(1){
image in = get_image_from_stream_cv(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
show_image(in, base);
int nboxes = 0;
float *X = in_s.data;
network_predict(net, X);
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h;
int dx = b.x*in.w-size/2.;
int dy = b.y*in.h-size/2.;
image bim = crop_image(in, dx, dy, size, size);
char buff[2048];
sprintf(buff, "results/extract/%07d", count);
++count;
save_image(bim, buff);
free_image(bim);
}
}
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream_cv(cap);
free_image(in);
}
}
#endif
}
*/
/*
void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets)
{
network_predict_image(net, im);
layer l = net->layers[net->n-1];
int nboxes = num_boxes(net);
fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
}
*/
void run_detector(int argc, char **argv)
{
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
float thresh = find_float_arg(argc, argv, "-thresh", .5);
float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int frame_skip = find_int_arg(argc, argv, "-s", 0);
int avg = find_int_arg(argc, argv, "-avg", 3);
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
char *outfile = find_char_arg(argc, argv, "-out", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if(gpu_list){
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for(i = 0; i < len; ++i){
if (gpu_list[i] == ',') ++ngpus;
}
gpus = calloc(ngpus, sizeof(int));
for(i = 0; i < ngpus; ++i){
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',')+1;
}
} else {
gpu = gpu_index;
gpus = &gpu;
ngpus = 1;
}
int clear = find_arg(argc, argv, "-clear");
int fullscreen = find_arg(argc, argv, "-fullscreen");
int width = find_int_arg(argc, argv, "-w", 0);
int height = find_int_arg(argc, argv, "-h", 0);
int fps = find_int_arg(argc, argv, "-fps", 0);
//int class = find_int_arg(argc, argv, "-class", 0);
char *datacfg = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6]: 0;
if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen);
else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "recall")) validate_detector_recall(datacfg, cfg, weights);
else if(0==strcmp(argv[2], "demo")) {
list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen);
}
//else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
//else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
}
|
GB_unop__ainv_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_uint8_uint8)
// op(A') function: GB (_unop_tran__ainv_uint8_uint8)
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = aij ; \
Cx [pC] = -z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_uint8_uint8)
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = -z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_uint8_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.